ngram
listlengths
0
82k
[ "4, 9] \"\"\" # Time: O(N) Space: O(n) def make_squares(arr):", "4, 4, 9] \"\"\" # Time: O(N) Space: O(n) def", "def make_squares(arr): n = len(arr) squares = [0 for x", "n - 1 while left <= right: leftSquare = arr[left]", "a sorted array, create a new array containing squares of", "n = len(arr) squares = [0 for x in range(n)]", "rightSquare = arr[right] * arr[right] if leftSquare > rightSquare: squares[highestSquareIdx]", "sorted array, create a new array containing squares of all", "array in the sorted order. Input: [-2, -1, 0, 2,", "arr[left] rightSquare = arr[right] * arr[right] if leftSquare > rightSquare:", "* arr[right] if leftSquare > rightSquare: squares[highestSquareIdx] = leftSquare left", "= rightSquare right -= 1 highestSquareIdx -= 1 return squares", "the sorted order. Input: [-2, -1, 0, 2, 3] Output:", "order. Input: [-2, -1, 0, 2, 3] Output: [0, 1,", "9] \"\"\" # Time: O(N) Space: O(n) def make_squares(arr): n", "[0, 1, 4, 4, 9] \"\"\" # Time: O(N) Space:", "1, 4, 4, 9] \"\"\" # Time: O(N) Space: O(n)", "pointers/sortedarr_square.py \"\"\" [E] Given a sorted array, create a new", "# Time: O(N) Space: O(n) def make_squares(arr): n = len(arr)", "3] Output: [0, 1, 4, 4, 9] \"\"\" # Time:", "squares = [0 for x in range(n)] highestSquareIdx = n", "O(n) def make_squares(arr): n = len(arr) squares = [0 for", "> rightSquare: squares[highestSquareIdx] = leftSquare left += 1 else: squares[highestSquareIdx]", "= [0 for x in range(n)] highestSquareIdx = n -", "0, n - 1 while left <= right: leftSquare =", "= len(arr) squares = [0 for x in range(n)] highestSquareIdx", "of all the number of the input array in the", "squares[highestSquareIdx] = leftSquare left += 1 else: squares[highestSquareIdx] = rightSquare", "containing squares of all the number of the input array", "all the number of the input array in the sorted", "if leftSquare > rightSquare: squares[highestSquareIdx] = leftSquare left += 1", "patterns/two pointers/sortedarr_square.py \"\"\" [E] Given a sorted array, create a", "in range(n)] highestSquareIdx = n - 1 left, right =", "[-2, -1, 0, 2, 3] Output: [0, 1, 4, 4,", "the number of the input array in the sorted order.", "+= 1 else: squares[highestSquareIdx] = rightSquare right -= 1 highestSquareIdx", "<filename>coding patterns/two pointers/sortedarr_square.py \"\"\" [E] Given a sorted array, create", "= arr[left] * arr[left] rightSquare = arr[right] * arr[right] if", "leftSquare = arr[left] * arr[left] rightSquare = arr[right] * arr[right]", "create a new array containing squares of all the number", "0, 2, 3] Output: [0, 1, 4, 4, 9] \"\"\"", "leftSquare left += 1 else: squares[highestSquareIdx] = rightSquare right -=", "make_squares(arr): n = len(arr) squares = [0 for x in", "\"\"\" [E] Given a sorted array, create a new array", "n - 1 left, right = 0, n - 1", "<= right: leftSquare = arr[left] * arr[left] rightSquare = arr[right]", "squares[highestSquareIdx] = rightSquare right -= 1 highestSquareIdx -= 1 return", "number of the input array in the sorted order. Input:", "rightSquare: squares[highestSquareIdx] = leftSquare left += 1 else: squares[highestSquareIdx] =", "right = 0, n - 1 while left <= right:", "the input array in the sorted order. Input: [-2, -1,", "Given a sorted array, create a new array containing squares", "2, 3] Output: [0, 1, 4, 4, 9] \"\"\" #", "x in range(n)] highestSquareIdx = n - 1 left, right", "of the input array in the sorted order. Input: [-2,", "a new array containing squares of all the number of", "= 0, n - 1 while left <= right: leftSquare", "1 left, right = 0, n - 1 while left", "[0 for x in range(n)] highestSquareIdx = n - 1", "range(n)] highestSquareIdx = n - 1 left, right = 0,", "for x in range(n)] highestSquareIdx = n - 1 left,", "Output: [0, 1, 4, 4, 9] \"\"\" # Time: O(N)", "Space: O(n) def make_squares(arr): n = len(arr) squares = [0", "left <= right: leftSquare = arr[left] * arr[left] rightSquare =", "input array in the sorted order. Input: [-2, -1, 0,", "1 while left <= right: leftSquare = arr[left] * arr[left]", "arr[right] if leftSquare > rightSquare: squares[highestSquareIdx] = leftSquare left +=", "- 1 while left <= right: leftSquare = arr[left] *", "= arr[right] * arr[right] if leftSquare > rightSquare: squares[highestSquareIdx] =", "= leftSquare left += 1 else: squares[highestSquareIdx] = rightSquare right", "-1, 0, 2, 3] Output: [0, 1, 4, 4, 9]", "right: leftSquare = arr[left] * arr[left] rightSquare = arr[right] *", "- 1 left, right = 0, n - 1 while", "highestSquareIdx = n - 1 left, right = 0, n", "new array containing squares of all the number of the", "array, create a new array containing squares of all the", "left += 1 else: squares[highestSquareIdx] = rightSquare right -= 1", "Input: [-2, -1, 0, 2, 3] Output: [0, 1, 4,", "Time: O(N) Space: O(n) def make_squares(arr): n = len(arr) squares", "leftSquare > rightSquare: squares[highestSquareIdx] = leftSquare left += 1 else:", "squares of all the number of the input array in", "arr[right] * arr[right] if leftSquare > rightSquare: squares[highestSquareIdx] = leftSquare", "while left <= right: leftSquare = arr[left] * arr[left] rightSquare", "= n - 1 left, right = 0, n -", "array containing squares of all the number of the input", "* arr[left] rightSquare = arr[right] * arr[right] if leftSquare >", "[E] Given a sorted array, create a new array containing", "\"\"\" # Time: O(N) Space: O(n) def make_squares(arr): n =", "left, right = 0, n - 1 while left <=", "in the sorted order. Input: [-2, -1, 0, 2, 3]", "1 else: squares[highestSquareIdx] = rightSquare right -= 1 highestSquareIdx -=", "sorted order. Input: [-2, -1, 0, 2, 3] Output: [0,", "arr[left] * arr[left] rightSquare = arr[right] * arr[right] if leftSquare", "O(N) Space: O(n) def make_squares(arr): n = len(arr) squares =", "len(arr) squares = [0 for x in range(n)] highestSquareIdx =", "else: squares[highestSquareIdx] = rightSquare right -= 1 highestSquareIdx -= 1" ]
[ "= [accuracy_file] outputs_map = { 'accuracy_file': accuracy_file } estimator =", "Model\", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file ],", "the accuracy. :param model_dir: The reference to the directory containing", "to run the step on :type compute_target: ComputeTarget :return: The", "(keys: accuracy_file) :rtype: EstimatorStep, dict ''' accuracy_file = PipelineData( name='accuracy_file',", ":rtype: EstimatorStep, dict ''' accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore,", "def evaluate_step(model_dir, test_dir, compute_target): ''' This step evaluates the trained", "is_directory=False) outputs = [accuracy_file] outputs_map = { 'accuracy_file': accuracy_file }", "trained model on the testing data and outputs the accuracy.", "data :type test_dir: DataReference :param compute_target: The compute target to", "containing the testing data :type test_dir: DataReference :param compute_target: The", "} estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step", "compute target to run the step on :type compute_target: ComputeTarget", "evaluates the trained model on the testing data and outputs", "test_dir: DataReference :param compute_target: The compute target to run the", "to the directory containing the testing data :type test_dir: DataReference", "PipelineData from azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps import EstimatorStep from", "the directory containing the trained model :type model_dir: DataReference :param", "import CondaDependencies from azureml.pipeline.core import PipelineData from azureml.pipeline.core import PipelineParameter", "import EstimatorStep from azureml.train.dnn import PyTorch def evaluate_step(model_dir, test_dir, compute_target):", "name=\"Evaluate Model\", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file", "PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step = EstimatorStep( name=\"Evaluate", "RunConfiguration from azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core import PipelineData from", "test_dir, compute_target): ''' This step evaluates the trained model on", "= { 'accuracy_file': accuracy_file } estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py',", "dict ''' accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False)", "EstimatorStep( name=\"Evaluate Model\", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file',", "model_dir: The reference to the directory containing the trained model", "compute_target): ''' This step evaluates the trained model on the", "The preprocess step, step outputs dictionary (keys: accuracy_file) :rtype: EstimatorStep,", "step outputs dictionary (keys: accuracy_file) :rtype: EstimatorStep, dict ''' accuracy_file", "accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs =", "outputs_map = { 'accuracy_file': accuracy_file } estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)),", "This step evaluates the trained model on the testing data", "CondaDependencies from azureml.pipeline.core import PipelineData from azureml.pipeline.core import PipelineParameter from", "azureml.train.dnn import PyTorch def evaluate_step(model_dir, test_dir, compute_target): ''' This step", "estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir],", "the step on :type compute_target: ComputeTarget :return: The preprocess step,", "The compute target to run the step on :type compute_target:", "to the directory containing the trained model :type model_dir: DataReference", "estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir,", "preprocess step, step outputs dictionary (keys: accuracy_file) :rtype: EstimatorStep, dict", "estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step =", "the directory containing the testing data :type test_dir: DataReference :param", ":type test_dir: DataReference :param compute_target: The compute target to run", "from azureml.pipeline.steps import PythonScriptStep from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies", "outputs dictionary (keys: accuracy_file) :rtype: EstimatorStep, dict ''' accuracy_file =", ":type compute_target: ComputeTarget :return: The preprocess step, step outputs dictionary", "azureml.pipeline.core import PipelineData from azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps import", "= PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step = EstimatorStep(", "model :type model_dir: DataReference :param test_dir: The reference to the", "'--model_dir', model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs, compute_target=compute_target, allow_reuse=True)", "''' accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs", "from azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps import EstimatorStep from azureml.train.dnn", "PythonScriptStep from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies from", "test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs, compute_target=compute_target,", "The reference to the directory containing the trained model :type", "model_dir: DataReference :param test_dir: The reference to the directory containing", "target to run the step on :type compute_target: ComputeTarget :return:", "directory containing the trained model :type model_dir: DataReference :param test_dir:", "accuracy_file } estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True)", "outputs the accuracy. :param model_dir: The reference to the directory", "accuracy_file) :rtype: EstimatorStep, dict ''' accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file',", "import PipelineData from azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps import EstimatorStep", "pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs = [accuracy_file] outputs_map = {", "{ 'accuracy_file': accuracy_file } estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3',", "DataReference :param test_dir: The reference to the directory containing the", "''' This step evaluates the trained model on the testing", "azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core import PipelineData from azureml.pipeline.core import", "on the testing data and outputs the accuracy. :param model_dir:", "from azureml.pipeline.core import PipelineData from azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps", ":param compute_target: The compute target to run the step on", "PipelineParameter from azureml.pipeline.steps import EstimatorStep from azureml.train.dnn import PyTorch def", "the testing data and outputs the accuracy. :param model_dir: The", "entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step = EstimatorStep( name=\"Evaluate Model\", estimator=estimator,", "compute_target=compute_target, use_gpu=True) step = EstimatorStep( name=\"Evaluate Model\", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir',", "azureml.pipeline.steps import PythonScriptStep from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import", "step evaluates the trained model on the testing data and", "data and outputs the accuracy. :param model_dir: The reference to", "PyTorch def evaluate_step(model_dir, test_dir, compute_target): ''' This step evaluates the", "The reference to the directory containing the testing data :type", ":param test_dir: The reference to the directory containing the testing", "testing data and outputs the accuracy. :param model_dir: The reference", "on :type compute_target: ComputeTarget :return: The preprocess step, step outputs", "= PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs = [accuracy_file]", "step = EstimatorStep( name=\"Evaluate Model\", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir',", "reference to the directory containing the testing data :type test_dir:", "the testing data :type test_dir: DataReference :param compute_target: The compute", ":return: The preprocess step, step outputs dictionary (keys: accuracy_file) :rtype:", "name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs = [accuracy_file] outputs_map =", "and outputs the accuracy. :param model_dir: The reference to the", "compute_target: ComputeTarget :return: The preprocess step, step outputs dictionary (keys:", "directory containing the testing data :type test_dir: DataReference :param compute_target:", "EstimatorStep from azureml.train.dnn import PyTorch def evaluate_step(model_dir, test_dir, compute_target): '''", "containing the trained model :type model_dir: DataReference :param test_dir: The", "from azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core import PipelineData from azureml.pipeline.core", "the trained model on the testing data and outputs the", "'--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs,", "import PyTorch def evaluate_step(model_dir, test_dir, compute_target): ''' This step evaluates", "model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs, compute_target=compute_target, allow_reuse=True) return", "evaluate_step(model_dir, test_dir, compute_target): ''' This step evaluates the trained model", "import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core import PipelineData", ":type model_dir: DataReference :param test_dir: The reference to the directory", "azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core import", "run the step on :type compute_target: ComputeTarget :return: The preprocess", ":param model_dir: The reference to the directory containing the trained", "PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs = [accuracy_file] outputs_map", "outputs = [accuracy_file] outputs_map = { 'accuracy_file': accuracy_file } estimator", "accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs, compute_target=compute_target, allow_reuse=True) return step, outputs_map", "from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core", "import PipelineParameter from azureml.pipeline.steps import EstimatorStep from azureml.train.dnn import PyTorch", "output_mode='mount', is_directory=False) outputs = [accuracy_file] outputs_map = { 'accuracy_file': accuracy_file", "= EstimatorStep( name=\"Evaluate Model\", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir,", "'accuracy_file': accuracy_file } estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target,", "test_dir: The reference to the directory containing the testing data", "import PythonScriptStep from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies", "accuracy. :param model_dir: The reference to the directory containing the", "source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step = EstimatorStep( name=\"Evaluate Model\",", "compute_target: The compute target to run the step on :type", "azureml.pipeline.steps import EstimatorStep from azureml.train.dnn import PyTorch def evaluate_step(model_dir, test_dir,", "import os from azureml.pipeline.steps import PythonScriptStep from azureml.core.runconfig import RunConfiguration", "ComputeTarget :return: The preprocess step, step outputs dictionary (keys: accuracy_file)", "from azureml.pipeline.steps import EstimatorStep from azureml.train.dnn import PyTorch def evaluate_step(model_dir,", "framework_version='1.3', compute_target=compute_target, use_gpu=True) step = EstimatorStep( name=\"Evaluate Model\", estimator=estimator, estimator_entry_script_arguments=[", "model on the testing data and outputs the accuracy. :param", "dictionary (keys: accuracy_file) :rtype: EstimatorStep, dict ''' accuracy_file = PipelineData(", "reference to the directory containing the trained model :type model_dir:", "the trained model :type model_dir: DataReference :param test_dir: The reference", "azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps import EstimatorStep from azureml.train.dnn import", "datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs = [accuracy_file] outputs_map = { 'accuracy_file':", "step, step outputs dictionary (keys: accuracy_file) :rtype: EstimatorStep, dict '''", "trained model :type model_dir: DataReference :param test_dir: The reference to", "[accuracy_file] outputs_map = { 'accuracy_file': accuracy_file } estimator = PyTorch(", "DataReference :param compute_target: The compute target to run the step", "os from azureml.pipeline.steps import PythonScriptStep from azureml.core.runconfig import RunConfiguration from", "testing data :type test_dir: DataReference :param compute_target: The compute target", "step on :type compute_target: ComputeTarget :return: The preprocess step, step", "EstimatorStep, dict ''' accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount',", "from azureml.train.dnn import PyTorch def evaluate_step(model_dir, test_dir, compute_target): ''' This", "use_gpu=True) step = EstimatorStep( name=\"Evaluate Model\", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir,", "'--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs, compute_target=compute_target, allow_reuse=True) return step," ]
[ "False self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG = False", "self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID = 0 self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION", "None self.TEST_CKPT_STEP = None # if \"None\", evaluate the latest", "self.DATA_WORKERS = 4 self.DATA_RANDOMCROP = (465, 465) self.DATA_RANDOMFLIP = 0.5", "'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log',", "0 self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT = ['val']", "cuda is not avalable') if self.TRAIN_GPUS == 0: raise ValueError('config.py:", "self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS", "= os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval') self.DATASETS =", "2 self.TEST_LOCAL_ATROUS_RATE = 1 # dist self.DIST_ENABLE = True self.DIST_BACKEND", "= True self.DIST_BACKEND = \"gloo\" self.DIST_URL = \"file://./sharefile\" self.DIST_START_GPU =", "self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP = None # if \"None\", evaluate", "= 4 self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE =", "False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi'", "True self.MODEL_REFINE_CHANNELS = 64 # n * 32 self.MODEL_LOW_LEVEL_INPLANES =", "= 1000 self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT =", "= 64 self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD =", "is 0') for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG,", "self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM", "['val'] self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP = None # if \"None\",", "2 self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT = 8", "self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE", "self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24 self.MODEL_RELATED_CHANNELS", "ValueError('config.py: cuda is not avalable') if self.TRAIN_GPUS == 0: raise", "self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION", "480 self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN = 3", "self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE = 800 *", "= os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA,", "= os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG =", "self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP = 0 self.TRAIN_LR", "True self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE = False", "1 self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION = True", "= False self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP =", "self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP", "0.5 self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR = 1.3", "= 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS =", "'./' self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB", "= os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT,", "= 1.3 self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT =", "= False self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG =", "self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT", "os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid')", "import cv2 import time class Configuration(): def __init__(self): self.EXP_NAME =", "not avalable') if self.TRAIN_GPUS == 0: raise ValueError('config.py: the number", "self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL =", "True self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS = 3", "= os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA,", "True self.PRETRAIN_FULL = False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet'", "= 3 self.PRETRAIN = True self.PRETRAIN_FULL = False self.PRETRAIN_MODEL =", "= 32 self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6,", "self.PRETRAIN = True self.PRETRAIN_FULL = False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE", "= None # if \"None\", evaluate the latest checkpoint. self.TEST_FLIP", "= 64 # n * 32 self.MODEL_LOW_LEVEL_INPLANES = 256 if", "if self.TEST_MULTISCALE == [1] else 800 self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS", "= 100000 self.TRAIN_START_STEP = 0 self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM =", "import sys import cv2 import time class Configuration(): def __init__(self):", "self.DIST_ENABLE = True self.DIST_BACKEND = \"gloo\" self.DIST_URL = \"file://./sharefile\" self.DIST_START_GPU", "os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log',", "self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME", "= 'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM =", "4, 6, 8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS =", "0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP", "* 1.3 if self.TEST_MULTISCALE == [1] else 800 self.TEST_WORKERS =", "self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10,", "os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION =", "self.TRAIN_GPUS == 0: raise ValueError('config.py: the number of GPU is", "'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL", "(465, 465) self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR =", "[1] self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE = 800 * 1.3 if", "path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if not", "self.MODEL_REFINE_CHANNELS = 64 # n * 32 self.MODEL_LOW_LEVEL_INPLANES = 256", "# dist self.DIST_ENABLE = True self.DIST_BACKEND = \"gloo\" self.DIST_URL =", "8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS = 64 #", "self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER = 0.9 self.TRAIN_GPUS", "\"file://./sharefile\" self.DIST_START_GPU = 0 self.__check() def __check(self): if not torch.cuda.is_available():", "None self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE = 1", "True self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS = 20", "True self.TEST_GPU_ID = 0 self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False", "self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID", "256 if self.MODEL_BACKBONE == 'resnet' else 24 self.MODEL_RELATED_CHANNELS = 64", "self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT =", "= None self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE ==", "'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval')", "0.9 self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS", "= './' self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS')", "= True self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS =", "= 1. self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ =", "= ['youtubevos'] self.DATA_WORKERS = 4 self.DATA_RANDOMCROP = (465, 465) self.DATA_RANDOMFLIP", "self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN", "self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER = 0.9 self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE", "self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME", "'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img')", "self.DIST_START_GPU = 0 self.__check() def __check(self): if not torch.cuda.is_available(): raise", "= True self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE =", "= True self.MODEL_REFINE_CHANNELS = 64 # n * 32 self.MODEL_LOW_LEVEL_INPLANES", "= 0 self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY =", "= 1000 self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER = 0.9 self.TRAIN_GPUS =", "'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT", "self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE", "= os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION", "= 25 self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12]", "6, 8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS = 64", "= 30 self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB =", "the latest checkpoint. self.TEST_FLIP = False self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE", "= os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT,", "= 2 self.TEST_LOCAL_ATROUS_RATE = 1 # dist self.DIST_ENABLE = True", "self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT", "'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM = 48", "= 1 self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID =", "= 15e-5 self.TRAIN_POWER = 0.9 self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE =", "ValueError('config.py: the number of GPU is 0') for path in", "= 5. self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME =", "os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval') self.DATASETS = ['youtubevos']", "raise ValueError('config.py: cuda is not avalable') if self.TRAIN_GPUS == 0:", "self.DIR_ROOT = './' self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA,", "self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG", "= 0 self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT =", "0 self.__check() def __check(self): if not torch.cuda.is_available(): raise ValueError('config.py: cuda", "64 # n * 32 self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE", "[2, 4, 6, 8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS", "= 0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM = 5.", "self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER", "self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if not os.path.isdir(path): os.makedirs(path) cfg = Configuration()", "self.TRAIN_POWER = 0.9 self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS", "self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT", "64 self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD = True", "= os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG =", "= 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH =", "self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN = True self.PRETRAIN_FULL", "4 self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG", "def __check(self): if not torch.cuda.is_available(): raise ValueError('config.py: cuda is not", "self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval') self.DATASETS = ['youtubevos'] self.DATA_WORKERS = 4", "torch import argparse import os import sys import cv2 import", "self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE", "= 100 self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS =", "self.TEST_MULTISCALE == [1] else 800 self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS =", "self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS /", "True self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP = 0", "self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN = True self.PRETRAIN_FULL = False self.PRETRAIN_MODEL", "False self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER = 0.9", "== 0: raise ValueError('config.py: the number of GPU is 0')", "self.MODEL_BACKBONE == 'resnet' else 24 self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON =", "0 self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE = 1", "60 self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15", "checkpoint. self.TEST_FLIP = False self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE = None", "self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ", "1e-5 self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING = True", "0 self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY = False", "= True self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS =", "self.PRETRAIN_FULL = False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE", "self.__check() def __check(self): if not torch.cuda.is_available(): raise ValueError('config.py: cuda is", "self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS = 64 # n * 32", "= 3 self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN = True self.PRETRAIN_FULL =", "self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP = 0 self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM", "25 self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE", "os.path.join(self.DIR_RESULT, 'eval') self.DATASETS = ['youtubevos'] self.DATA_WORKERS = 4 self.DATA_RANDOMCROP =", "256 self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM = 256", "self.TEST_CKPT_STEP = None # if \"None\", evaluate the latest checkpoint.", "[1] else 800 self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE", "self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard')", "self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR", "= \"gloo\" self.DIST_URL = \"file://./sharefile\" self.DIST_START_GPU = 0 self.__check() def", "20 self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS", "= 0.5 self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR =", "Configuration(): def __init__(self): self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT = './' self.DIR_DATA", "= [2, 4, 6, 8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE = True", "800 * 1.3 if self.TEST_MULTISCALE == [1] else 800 self.TEST_WORKERS", "= [1] self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE = 800 * 1.3", "12] self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS = 64 # n *", "* 32 self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else", "= 256 if self.MODEL_BACKBONE == 'resnet' else 24 self.MODEL_RELATED_CHANNELS =", "= 0.01 self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS =", "import argparse import os import sys import cv2 import time", "dist self.DIST_ENABLE = True self.DIST_BACKEND = \"gloo\" self.DIST_URL = \"file://./sharefile\"", "1000 self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT = None", "self.TEST_FLIP = False self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE", "False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM", "'log', 'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval') self.DATASETS = ['youtubevos'] self.DATA_WORKERS", "= None self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE =", "cv2 import time class Configuration(): def __init__(self): self.EXP_NAME = 'mobilenetv2_cfbi'", "self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE =", "self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB", "os import sys import cv2 import time class Configuration(): def", "not torch.cuda.is_available(): raise ValueError('config.py: cuda is not avalable') if self.TRAIN_GPUS", "= 48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM =", "in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if not os.path.isdir(path):", "raise ValueError('config.py: the number of GPU is 0') for path", "self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG =", "8 self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP = 0", "latest checkpoint. self.TEST_FLIP = False self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE =", "import time class Configuration(): def __init__(self): self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT", "16 self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100", "self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP", "self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT", "= 0 self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE =", "= 'mobilenetv2_cfbi' self.DIR_ROOT = './' self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS", "= None self.TEST_CKPT_STEP = None # if \"None\", evaluate the", "= True self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP =", "self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB =", "the number of GPU is 0') for path in [self.DIR_RESULT,", "n * 32 self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet'", "of GPU is 0') for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG,", "self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS", "1.3 self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT = 30", "os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT,", "self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN", "= 3 self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN =", "def __init__(self): self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT = './' self.DIR_DATA =", "= 0.9 self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY =", "1. self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ = True", "256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS = 25", "self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE", "torch.cuda.is_available(): raise ValueError('config.py: cuda is not avalable') if self.TRAIN_GPUS ==", "= os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT =", "'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt')", "self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG", "self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING", "os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train')", "else 800 self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE =", "__init__(self): self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT = './' self.DIR_DATA = os.path.join(self.DIR_ROOT,", "= 8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG = False", "== [1] else 800 self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS = 4", "= (465, 465) self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR", "= False self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE =", "self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS", "= False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2", "is not avalable') if self.TRAIN_GPUS == 0: raise ValueError('config.py: the", "'mobilenetv2_cfbi' self.DIR_ROOT = './' self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS =", "self.TRAIN_START_STEP = 0 self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY", "30 self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB = 3", "4 self.DATA_RANDOMCROP = (465, 465) self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS =", "= 5 self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN =", "self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval') self.DATASETS", "__check(self): if not torch.cuda.is_available(): raise ValueError('config.py: cuda is not avalable')", "3 self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN = True", "= ['val'] self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP = None # if", "self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE", "48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64", "8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP", "1.3 if self.TEST_MULTISCALE == [1] else 800 self.TEST_WORKERS = 4", "True self.DIST_BACKEND = \"gloo\" self.DIST_URL = \"file://./sharefile\" self.DIST_START_GPU = 0", "# n * 32 self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE ==", "self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG", "= False self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER =", "= self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP = 1000", "self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE = 1 #", "'eval') self.DATASETS = ['youtubevos'] self.DATA_WORKERS = 4 self.DATA_RANDOMCROP = (465,", "self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN", "0.01 self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS = 1000", "3 self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN = True self.PRETRAIN_FULL = False", "/ 2 self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT =", "= True self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN =", "0: raise ValueError('config.py: the number of GPU is 0') for", "avalable') if self.TRAIN_GPUS == 0: raise ValueError('config.py: the number of", "= 16 self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM =", "self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE = 1 # dist self.DIST_ENABLE =", "5 self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN = 480", "2 self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP = 20", "= 1e-5 self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING =", "= 480 self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN =", "= 256 self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM =", "= './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE =", "else 24 self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND =", "20 self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID = 0 self.TEST_DATASET = 'youtubevos'", "1 # dist self.DIST_ENABLE = True self.DIST_BACKEND = \"gloo\" self.DIST_URL", "64 self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE = [2,", "False self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP = None", "'result', self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log')", "False self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP = 0 self.TRAIN_LR = 0.01", "4 self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE = 1 # dist self.DIST_ENABLE", "= False self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP = 0 self.TRAIN_LR =", "1 self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID = 0", "self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM", "self.DIST_URL = \"file://./sharefile\" self.DIST_START_GPU = 0 self.__check() def __check(self): if", "self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM =", "= \"file://./sharefile\" self.DIST_START_GPU = 0 self.__check() def __check(self): if not", "self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM", "self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP", "100 self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS = 32", "self.DIST_BACKEND = \"gloo\" self.DIST_URL = \"file://./sharefile\" self.DIST_START_GPU = 0 self.__check()", "None self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1]", "'./pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16", "= 4 self.DATA_RANDOMCROP = (465, 465) self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS", "800 self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE = 2", "number of GPU is 0') for path in [self.DIR_RESULT, self.DIR_CKPT,", "self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT = './' self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets')", "self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD", "self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH", "32 self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8,", "== 'resnet' else 24 self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON = 1e-5", "= 1 # dist self.DIST_ENABLE = True self.DIST_BACKEND = \"gloo\"", "= os.path.join(self.DIR_RESULT, 'eval') self.DATASETS = ['youtubevos'] self.DATA_WORKERS = 4 self.DATA_RANDOMCROP", "True self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN = True", "\"None\", evaluate the latest checkpoint. self.TEST_FLIP = False self.TEST_MULTISCALE =", "self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP = None #", "= False self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME =", "os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT,", "self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG", "self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS", "15e-5 self.TRAIN_POWER = 0.9 self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE = 8", "self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1] else", "import torch import argparse import os import sys import cv2", "3 self.PRETRAIN = True self.PRETRAIN_FULL = False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar'", "'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH = None", "argparse import os import sys import cv2 import time class", "if not torch.cuda.is_available(): raise ValueError('config.py: cuda is not avalable') if", "self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP =", "= 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM =", "self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS /", "import os import sys import cv2 import time class Configuration():", "self.DATA_RANDOMCROP = (465, 465) self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS = 5", "'resnet' else 24 self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND", "= 64 self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE =", "self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID = 0 self.TEST_DATASET", "= 4 self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2", "= 60 self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS =", "GPU is 0') for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION,", "self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS", "self.DATASETS = ['youtubevos'] self.DATA_WORKERS = 4 self.DATA_RANDOMCROP = (465, 465)", "1000 self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER = 0.9 self.TRAIN_GPUS = 4", "self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4,", "# if \"None\", evaluate the latest checkpoint. self.TEST_FLIP = False", "[self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if not os.path.isdir(path): os.makedirs(path)", "False self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME = True", "self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP", "self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP", "0') for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]:", "time class Configuration(): def __init__(self): self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT =", "= 20 self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP =", "self.TEST_LOCAL_ATROUS_RATE = 1 # dist self.DIST_ENABLE = True self.DIST_BACKEND =", "32 self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24", "= 800 * 1.3 if self.TEST_MULTISCALE == [1] else 800", "for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if", "self.TEST_GPU_ID = 0 self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT", "if self.MODEL_BACKBONE == 'resnet' else 24 self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON", "self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if not os.path.isdir(path): os.makedirs(path) cfg =", "465) self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR = 1.", "True self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS = 100000", "self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if not os.path.isdir(path): os.makedirs(path) cfg", "evaluate the latest checkpoint. self.TEST_FLIP = False self.TEST_MULTISCALE = [1]", "= 8 self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP =", "self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE", "5. self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME = False", "= 0 self.__check() def __check(self): if not torch.cuda.is_available(): raise ValueError('config.py:", "None # if \"None\", evaluate the latest checkpoint. self.TEST_FLIP =", "= 20 self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID = 0 self.TEST_DATASET =", "self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG =", "self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE", "self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN", "self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM", "24 self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND = True", "100000 self.TRAIN_START_STEP = 0 self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM = 0.9", "= False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE =", "= 1 self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION =", "if self.TRAIN_GPUS == 0: raise ValueError('config.py: the number of GPU", "self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM", "10, 12] self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS = 64 # n", "if \"None\", evaluate the latest checkpoint. self.TEST_FLIP = False self.TEST_MULTISCALE", "'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval') self.DATASETS = ['youtubevos'] self.DATA_WORKERS =", "= 4 self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE = 1 # dist", "= True self.TEST_GPU_ID = 0 self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION =", "= self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP = 60", "= True self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS =", "self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP =", "'log', 'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT,", "= True self.PRETRAIN_FULL = False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE =", "/ 2 self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP =", "class Configuration(): def __init__(self): self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT = './'", "False self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE = 800", "= 0.9 self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS =", "self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS", "'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME)", "os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result',", "sys import cv2 import time class Configuration(): def __init__(self): self.EXP_NAME", "['youtubevos'] self.DATA_WORKERS = 4 self.DATA_RANDOMCROP = (465, 465) self.DATA_RANDOMFLIP =", "self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS", "4 self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE = 1", "\"gloo\" self.DIST_URL = \"file://./sharefile\" self.DIST_START_GPU = 0 self.__check() def __check(self):", "'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM = 256", "0.9 self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY = 15e-5", "os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT,", "self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY" ]
[ "Content-type header. headers = { \"Content-type\": \"application/x-www-form-urlencoded\" } conn =", "POST request and encode them in # a URL-safe format.", "a URL-safe format. params = urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'),", "Define the parameters for the POST request and encode them", "'warnings'), ]) # Always use the following value for the", "{ \"Content-type\": \"application/x-www-form-urlencoded\" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params,", "\"Content-type\": \"application/x-www-form-urlencoded\" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers)", "'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info',", "for the POST request and encode them in # a", "headers = { \"Content-type\": \"application/x-www-form-urlencoded\" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST',", "and encode them in # a URL-safe format. params =", "'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'), ])", "sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format',", "'text'), ('output_info', 'warnings'), ]) # Always use the following value", "urllib, sys # Define the parameters for the POST request", "following value for the Content-type header. headers = { \"Content-type\":", "<gh_stars>1-10 #!/usr/bin/python2.4 import httplib, urllib, sys # Define the parameters", "= urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'),", "conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response = conn.getresponse()", "# Define the parameters for the POST request and encode", "# Always use the following value for the Content-type header.", "('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'), ]) #", "urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level',", "('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'), ]) # Always use", "('output_info', 'warnings'), ]) # Always use the following value for", "the following value for the Content-type header. headers = {", "\"application/x-www-form-urlencoded\" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response", "#!/usr/bin/python2.4 import httplib, urllib, sys # Define the parameters for", "'/compile', params, headers) response = conn.getresponse() data = response.read() print", "import httplib, urllib, sys # Define the parameters for the", "('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'),", "= httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response = conn.getresponse() data", "request and encode them in # a URL-safe format. params", "encode them in # a URL-safe format. params = urllib.urlencode([", "conn.request('POST', '/compile', params, headers) response = conn.getresponse() data = response.read()", "httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response = conn.getresponse() data =", "header. headers = { \"Content-type\": \"application/x-www-form-urlencoded\" } conn = httplib.HTTPConnection('closure-compiler.appspot.com')", "in # a URL-safe format. params = urllib.urlencode([ #('js_code', sys.argv[1]),", "for the Content-type header. headers = { \"Content-type\": \"application/x-www-form-urlencoded\" }", "sys # Define the parameters for the POST request and", "parameters for the POST request and encode them in #", "format. params = urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'),", "('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'),", "params = urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url',", "# a URL-safe format. params = urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url',", "'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'), ]) # Always use the", "('output_format', 'text'), ('output_info', 'warnings'), ]) # Always use the following", "Always use the following value for the Content-type header. headers", "use the following value for the Content-type header. headers =", "URL-safe format. params = urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url',", "them in # a URL-safe format. params = urllib.urlencode([ #('js_code',", "} conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response =", "= { \"Content-type\": \"application/x-www-form-urlencoded\" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile',", "params, headers) response = conn.getresponse() data = response.read() print data", "the Content-type header. headers = { \"Content-type\": \"application/x-www-form-urlencoded\" } conn", "the POST request and encode them in # a URL-safe", "value for the Content-type header. headers = { \"Content-type\": \"application/x-www-form-urlencoded\"", "headers) response = conn.getresponse() data = response.read() print data conn.close()", "'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'), ]) # Always", "the parameters for the POST request and encode them in", "#('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'),", "]) # Always use the following value for the Content-type", "httplib, urllib, sys # Define the parameters for the POST" ]
[ "def testMarkUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with self.test_session():", "_): in_this_function() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used',", "self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with", "2.0 (the \"License\"); # you may not use this file", "under the License. # ============================================================================== \"\"\"Unit tests for tf_should_use.\"\"\" #", "'\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah2:0', msg) self.assertIn('return_const', msg)", "the API. def testMarkUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3')", "del h with reroute_error() as (_, fatal): in_this_function() fatal.assert_called() msg", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved. #", "disable=unused-import from __future__ import absolute_import from __future__ import division from", "= '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah2:0', msg) self.assertIn('return_const',", "= h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result def", "testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _ =", "def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _ = h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect()", "error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _ = h +", "def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _ = h + 1 self._testAddShouldUseWarningWhenUsed(add,", "use this file except in compliance with the License. #", "permissions and # limitations under the License. # ============================================================================== \"\"\"Unit", "add(h): _ = h + 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage)", "tf_should_use._add_should_use_warning(c) fn(h) del h error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h):", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "h + 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def", "tf_should_use.\"\"\" # pylint: disable=unused-import from __future__ import absolute_import from __future__", "License. # You may obtain a copy of the License", "reroute_error(): \"\"\"Temporarily reroute errors written to tf_logging.error into `captured`.\"\"\" with", "error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal: yield error, fatal class", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "h with reroute_error() as (_, fatal): in_this_function() fatal.assert_called() msg =", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _", "============================================================================== \"\"\"Unit tests for tf_should_use.\"\"\" # pylint: disable=unused-import from __future__", "with reroute_error() as (_, fatal): in_this_function() fatal.assert_called() msg = '\\n'.join(fatal.call_args[0])", "= '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah3:0', msg) self.assertIn('return_const',", "Reserved. # # Licensed under the Apache License, Version 2.0", "the License. # ============================================================================== \"\"\"Unit tests for tf_should_use.\"\"\" # pylint:", "governing permissions and # limitations under the License. # ==============================================================================", "never used', msg) self.assertIn('blah2:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) def", "from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.util", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function():", "used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn,", "with reroute_error() as (error, _): with self.test_session(): return_const(0.0) # Creating", "tf_should_use._add_should_use_warning(c, fatal_error=True) del h with reroute_error() as (_, fatal): in_this_function()", "name='meh') v.eval() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used',", "gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3')", "from __future__ import absolute_import from __future__ import division from __future__", "in compliance with the License. # You may obtain a", "testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _ = h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage)", "software # distributed under the License is distributed on an", "division from __future__ import print_function import contextlib import gc import", "reroute_error() as (error, _): return_const(0.0) error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object", "import sys from tensorflow.python.framework import constant_op from tensorflow.python.platform import test", "constant_op.constant(value, name='blah3') with reroute_error() as (error, _): with self.test_session(): return_const(0.0)", "= constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c, fatal_error=True) del", "fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _ = h + 1", "self.assertFalse(gc.garbage) # Tests that mark_used is available in the API.", "def _testAddShouldUseWarningWhenUsed(self, fn, name): c = constant_op.constant(0, name=name) with reroute_error()", "import division from __future__ import print_function import contextlib import gc", "= '\\n'.join(fatal.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function',", "error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah2:0',", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= tf_should_use._add_should_use_warning(c) del h with reroute_error() as (error, _): in_this_function()", "from __future__ import print_function import contextlib import gc import sys", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "yield error, fatal class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0,", "self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _ = h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name')", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c,", "to in writing, software # distributed under the License is", "def get_name(h): _ = h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def", "errors written to tf_logging.error into `captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging, 'error') as", "# See the License for the specific language governing permissions", "import print_function import contextlib import gc import sys from tensorflow.python.framework", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "fn(h) del h error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "reroute_error() as (_, fatal): in_this_function() fatal.assert_called() msg = '\\n'.join(fatal.call_args[0]) self.assertIn('Object", "op and executing it does not mark the # unused", "limitations under the License. # ============================================================================== \"\"\"Unit tests for tf_should_use.\"\"\"", "reroute errors written to tf_logging.error into `captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging, 'error')", "API. def testMarkUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with", "(error, _): return_const(0.0) error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "written to tf_logging.error into `captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error:", "reroute_error() as (error, fatal): h = tf_should_use._add_should_use_warning(c) fn(h) del h", "distributed under the License is distributed on an \"AS IS\"", "to tf_logging.error into `captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error: with", "test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal: yield error, fatal class TfShouldUseTest(test.TestCase): def", "import test from tensorflow.python.platform import tf_logging from tensorflow.python.util import tf_should_use", "was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "fatal_error=True) del h with reroute_error() as (_, fatal): in_this_function() fatal.assert_called()", "name='blah_add') gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _ = h.name", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "h = tf_should_use._add_should_use_warning(c) del h with reroute_error() as (error, _):", "used', msg) self.assertIn('blah3:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) # Tests", "as (error, _): return_const(0.0) error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was", "tf_should_use @contextlib.contextmanager def reroute_error(): \"\"\"Temporarily reroute errors written to tf_logging.error", "with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal: yield error, fatal class TfShouldUseTest(test.TestCase):", "def testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h =", "msg) gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value,", "writing, software # distributed under the License is distributed on", "_ = h + 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage) def", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h", "@tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with self.test_session(): return_const(0.0).mark_used() if", "c = constant_op.constant(0, name=name) with reroute_error() as (error, fatal): h", "CONDITIONS OF ANY KIND, either express or implied. # See", "= constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c) del h", "tf_should_use._add_should_use_warning(c) del h with reroute_error() as (error, _): in_this_function() error.assert_called()", "del h with reroute_error() as (error, _): in_this_function() error.assert_called() msg", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "'error') as error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal: yield error,", "self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value): return", "as fatal: yield error, fatal class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c", "is available in the API. def testMarkUsed(self): @tf_should_use.should_use_result def return_const(value):", "as error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal: yield error, fatal", "_): return_const(0.0) error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used',", "return constant_op.constant(value, name='blah3') with reroute_error() as (error, _): with self.test_session():", "in_this_function() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg)", "constant_op.constant(value, name='blah2') with reroute_error() as (error, _): return_const(0.0) error.assert_called() msg", "constant_op.constant(1.0, name='meh') v.eval() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never", "v = constant_op.constant(1.0, name='meh') v.eval() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object", "and executing it does not mark the # unused op", "OR CONDITIONS OF ANY KIND, either express or implied. #", "return_const(value): return constant_op.constant(value, name='blah3') with reroute_error() as (error, _): with", "the License is distributed on an \"AS IS\" BASIS, #", "h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value):", "msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn, name): c =", "c = constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c) del", "TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h", "fatal.assert_called() msg = '\\n'.join(fatal.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0',", "from tensorflow.python.util import tf_should_use @contextlib.contextmanager def reroute_error(): \"\"\"Temporarily reroute errors", "def testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah2') with reroute_error()", "tensorflow.python.platform import tf_logging from tensorflow.python.util import tf_should_use @contextlib.contextmanager def reroute_error():", "as (error, _): with self.test_session(): return_const(0.0) # Creating another op", "msg = '\\n'.join(fatal.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg)", "def return_const(value): return constant_op.constant(value, name='blah3') with reroute_error() as (error, _):", "law or agreed to in writing, software # distributed under", "gc import sys from tensorflow.python.framework import constant_op from tensorflow.python.platform import", "executing it does not mark the # unused op as", "testMarkUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with self.test_session(): return_const(0.0).mark_used()", "import gc import sys from tensorflow.python.framework import constant_op from tensorflow.python.platform", "fatal): in_this_function() fatal.assert_called() msg = '\\n'.join(fatal.call_args[0]) self.assertIn('Object was never used',", "reroute_error() as (error, _): with self.test_session(): return_const(0.0) # Creating another", "was never used', msg) self.assertIn('blah2:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage)", "get_name(h): _ = h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self):", "'\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah3:0', msg) self.assertIn('return_const', msg)", "with reroute_error() as (error, _): return_const(0.0) error.assert_called() msg = '\\n'.join(error.call_args[0])", "import constant_op from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging", "may obtain a copy of the License at # #", "test from tensorflow.python.platform import tf_logging from tensorflow.python.util import tf_should_use @contextlib.contextmanager", "self.assertIn('Object was never used', msg) self.assertIn('blah3:0', msg) self.assertIn('return_const', msg) gc.collect()", "used', msg) self.assertIn('blah2:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self):", "not mark the # unused op as being \"used\". v", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.assertIn('blah3:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) # Tests that mark_used", "for tf_should_use.\"\"\" # pylint: disable=unused-import from __future__ import absolute_import from", "msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg)", "in the API. def testMarkUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value,", "testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah2') with reroute_error() as", "may not use this file except in compliance with the", "the # unused op as being \"used\". v = constant_op.constant(1.0,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "sys from tensorflow.python.framework import constant_op from tensorflow.python.platform import test from", "this file except in compliance with the License. # You", "unused op as being \"used\". v = constant_op.constant(1.0, name='meh') v.eval()", "def reroute_error(): \"\"\"Temporarily reroute errors written to tf_logging.error into `captured`.\"\"\"", "import absolute_import from __future__ import division from __future__ import print_function", "2017 The TensorFlow Authors. All Rights Reserved. # # Licensed", "# unused op as being \"used\". v = constant_op.constant(1.0, name='meh')", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "__future__ import print_function import contextlib import gc import sys from", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "c = constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c, fatal_error=True)", "self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def", "msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) # Tests that mark_used is", "= constant_op.constant(0, name=name) with reroute_error() as (error, fatal): h =", "as (_, fatal): in_this_function() fatal.assert_called() msg = '\\n'.join(fatal.call_args[0]) self.assertIn('Object was", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "return_const(0.0) error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg)", "from tensorflow.python.platform import tf_logging from tensorflow.python.util import tf_should_use @contextlib.contextmanager def", "class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function():", "v.eval() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg)", "def in_this_function(): h = tf_should_use._add_should_use_warning(c) del h with reroute_error() as", "contextlib import gc import sys from tensorflow.python.framework import constant_op from", "gc.collect() self.assertFalse(gc.garbage) # Tests that mark_used is available in the", "self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn, name): c = constant_op.constant(0, name=name) with", "return_const(value): return constant_op.constant(value, name='blah2') with reroute_error() as (error, _): return_const(0.0)", "# pylint: disable=unused-import from __future__ import absolute_import from __future__ import", "Creating another op and executing it does not mark the", "error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah3:0',", "name='blah2') with reroute_error() as (error, _): return_const(0.0) error.assert_called() msg =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "# Creating another op and executing it does not mark", "\"\"\"Unit tests for tf_should_use.\"\"\" # pylint: disable=unused-import from __future__ import", "tensorflow.python.util import tf_should_use @contextlib.contextmanager def reroute_error(): \"\"\"Temporarily reroute errors written", "msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c =", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "self.assertIn('Object was never used', msg) self.assertIn('blah2:0', msg) self.assertIn('return_const', msg) gc.collect()", "def return_const(value): return constant_op.constant(value, name='blah3') with self.test_session(): return_const(0.0).mark_used() if __name__", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "in_this_function(): h = tf_should_use._add_should_use_warning(c) del h with reroute_error() as (error,", "into `captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal')", "reroute_error() as (error, _): in_this_function() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "def testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h =", "= tf_should_use._add_should_use_warning(c) fn(h) del h error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def", "and # limitations under the License. # ============================================================================== \"\"\"Unit tests", "print_function import contextlib import gc import sys from tensorflow.python.framework import", "@tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah2') with reroute_error() as (error,", "gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah2')", "(the \"License\"); # you may not use this file except", "# ============================================================================== \"\"\"Unit tests for tf_should_use.\"\"\" # pylint: disable=unused-import from", "# you may not use this file except in compliance", "fatal class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def", "import contextlib import gc import sys from tensorflow.python.framework import constant_op", "being \"used\". v = constant_op.constant(1.0, name='meh') v.eval() error.assert_called() msg =", "msg) gc.collect() self.assertFalse(gc.garbage) # Tests that mark_used is available in", "mark_used is available in the API. def testMarkUsed(self): @tf_should_use.should_use_result def", "with reroute_error() as (error, _): in_this_function() error.assert_called() msg = '\\n'.join(error.call_args[0])", "in_this_function(): h = tf_should_use._add_should_use_warning(c, fatal_error=True) del h with reroute_error() as", "# # Unless required by applicable law or agreed to", "self.test_session(): return_const(0.0) # Creating another op and executing it does", "return constant_op.constant(value, name='blah3') with self.test_session(): return_const(0.0).mark_used() if __name__ == '__main__':", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "(error, _): with self.test_session(): return_const(0.0) # Creating another op and", "msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value):", "= constant_op.constant(1.0, name='meh') v.eval() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was", "def return_const(value): return constant_op.constant(value, name='blah2') with reroute_error() as (error, _):", "self.assertIn('blah2:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def", "__future__ import absolute_import from __future__ import division from __future__ import", "fatal: yield error, fatal class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c =", "implied. # See the License for the specific language governing", "self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn, name): c = constant_op.constant(0,", "under the Apache License, Version 2.0 (the \"License\"); # you", "testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _ = h + 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add')", "as (error, _): in_this_function() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was", "msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0, name='blah0')", "= '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function',", "self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value): return", "it does not mark the # unused op as being", "by applicable law or agreed to in writing, software #", "name=name) with reroute_error() as (error, fatal): h = tf_should_use._add_should_use_warning(c) fn(h)", "tf_logging from tensorflow.python.util import tf_should_use @contextlib.contextmanager def reroute_error(): \"\"\"Temporarily reroute", "(_, fatal): in_this_function() fatal.assert_called() msg = '\\n'.join(fatal.call_args[0]) self.assertIn('Object was never", "does not mark the # unused op as being \"used\".", "@contextlib.contextmanager def reroute_error(): \"\"\"Temporarily reroute errors written to tf_logging.error into", "def in_this_function(): h = tf_should_use._add_should_use_warning(c, fatal_error=True) del h with reroute_error()", "available in the API. def testMarkUsed(self): @tf_should_use.should_use_result def return_const(value): return", "another op and executing it does not mark the #", "del h error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _ =", "h = tf_should_use._add_should_use_warning(c, fatal_error=True) del h with reroute_error() as (_,", "self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah2') with", "self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage)", "@tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with reroute_error() as (error,", "__future__ import division from __future__ import print_function import contextlib import", "\"used\". v = constant_op.constant(1.0, name='meh') v.eval() error.assert_called() msg = '\\n'.join(error.call_args[0])", "return_const(value): return constant_op.constant(value, name='blah3') with self.test_session(): return_const(0.0).mark_used() if __name__ ==", "`captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as", "used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "def add(h): _ = h + 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect()", "Unless required by applicable law or agreed to in writing,", "from tensorflow.python.framework import constant_op from tensorflow.python.platform import test from tensorflow.python.platform", "msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn, name): c = constant_op.constant(0, name=name)", "as (error, fatal): h = tf_should_use._add_should_use_warning(c) fn(h) del h error.assert_not_called()", "(error, _): in_this_function() error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never", "Tests that mark_used is available in the API. def testMarkUsed(self):", "as being \"used\". v = constant_op.constant(1.0, name='meh') v.eval() error.assert_called() msg", "import tf_should_use @contextlib.contextmanager def reroute_error(): \"\"\"Temporarily reroute errors written to", "License. # ============================================================================== \"\"\"Unit tests for tf_should_use.\"\"\" # pylint: disable=unused-import", "the specific language governing permissions and # limitations under the", "never used', msg) self.assertIn('blah3:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) #", "was never used', msg) self.assertIn('blah3:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage)", "applicable law or agreed to in writing, software # distributed", "fn, name): c = constant_op.constant(0, name=name) with reroute_error() as (error,", "error, fatal class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0, name='blah0')", "testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with reroute_error() as", "op as being \"used\". v = constant_op.constant(1.0, name='meh') v.eval() error.assert_called()", "name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c) del h with reroute_error()", "that mark_used is available in the API. def testMarkUsed(self): @tf_should_use.should_use_result", "in writing, software # distributed under the License is distributed", "= tf_should_use._add_should_use_warning(c, fatal_error=True) del h with reroute_error() as (_, fatal):", "_testAddShouldUseWarningWhenUsed(self, fn, name): c = constant_op.constant(0, name=name) with reroute_error() as", "self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) # Tests that mark_used is available", "name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c, fatal_error=True) del h with", "name='blah3') with reroute_error() as (error, _): with self.test_session(): return_const(0.0) #", "constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c, fatal_error=True) del h", "pylint: disable=unused-import from __future__ import absolute_import from __future__ import division", "'\\n'.join(fatal.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg)", "+ 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h):", "(error, fatal): h = tf_should_use._add_should_use_warning(c) fn(h) del h error.assert_not_called() fatal.assert_not_called()", "msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah2:0', msg)", "def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with reroute_error()", "import tf_logging from tensorflow.python.util import tf_should_use @contextlib.contextmanager def reroute_error(): \"\"\"Temporarily", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "h with reroute_error() as (error, _): in_this_function() error.assert_called() msg =", "License, Version 2.0 (the \"License\"); # you may not use", "h = tf_should_use._add_should_use_warning(c) fn(h) del h error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self):", "= h + 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self):", "# You may obtain a copy of the License at", "h error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _ = h", "tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.util import", "msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn, name):", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Authors. All Rights Reserved. # # Licensed under the Apache", "# Tests that mark_used is available in the API. def", "from __future__ import division from __future__ import print_function import contextlib", "# limitations under the License. # ============================================================================== \"\"\"Unit tests for", "self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn, name): c", "test.mock.patch.object(tf_should_use.tf_logging, 'error') as error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal: yield", "'fatal') as fatal: yield error, fatal class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self):", "the License for the specific language governing permissions and #", "never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self):", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "Copyright 2017 The TensorFlow Authors. All Rights Reserved. # #", "fatal): h = tf_should_use._add_should_use_warning(c) fn(h) del h error.assert_not_called() fatal.assert_not_called() def", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0,", "name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value,", "return_const(0.0) # Creating another op and executing it does not", "tests for tf_should_use.\"\"\" # pylint: disable=unused-import from __future__ import absolute_import", "tensorflow.python.framework import constant_op from tensorflow.python.platform import test from tensorflow.python.platform import", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "constant_op.constant(0, name=name) with reroute_error() as (error, fatal): h = tf_should_use._add_should_use_warning(c)", "'\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg)", "_ = h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result", "tf_logging.error into `captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error: with test.mock.patch.object(tf_should_use.tf_logging,", "absolute_import from __future__ import division from __future__ import print_function import", "msg) self.assertIn('blah2:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result", "\"License\"); # you may not use this file except in", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"\"\"Temporarily reroute errors written to tf_logging.error into `captured`.\"\"\" with test.mock.patch.object(tf_should_use.tf_logging,", "with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal:", "gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _ = h.name self._testAddShouldUseWarningWhenUsed(get_name,", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "constant_op.constant(value, name='blah3') with self.test_session(): return_const(0.0).mark_used() if __name__ == '__main__': test.main()", "error.assert_called() msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0',", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "_): with self.test_session(): return_const(0.0) # Creating another op and executing", "msg) self.assertIn('blah3:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) # Tests that", "You may obtain a copy of the License at #", "with reroute_error() as (error, fatal): h = tf_should_use._add_should_use_warning(c) fn(h) del", "msg = '\\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah3:0', msg)", "name): c = constant_op.constant(0, name=name) with reroute_error() as (error, fatal):", "constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c) del h with", "the Apache License, Version 2.0 (the \"License\"); # you may", "never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self,", "in_this_function() fatal.assert_called() msg = '\\n'.join(fatal.call_args[0]) self.assertIn('Object was never used', msg)", "return constant_op.constant(value, name='blah2') with reroute_error() as (error, _): return_const(0.0) error.assert_called()", "with self.test_session(): return_const(0.0) # Creating another op and executing it", "mark the # unused op as being \"used\". v =", "constant_op from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from" ]
[ "This returns an indented list of properties separated with newlines:", "Path to the JDK. version: Version of the JDK. runtime_name:", "runtime_name, java_home = java_home, visibility = visibility, ) native.config_setting( name", "version: optionally java version \"\"\" _local_java_repository_rule(name = name, java_home =", "runtime_name == None: runtime_name = name native.java_runtime( name = runtime_name,", "if property.startswith(\"java.version = \")] if len(version_property) != 1: return None", "2.0 (the \"License\"); # you may not use this file", "{java_binary} in {java_home}; either correct your JAVA_HOME, \" + \"PATH", "repository_rule( implementation = _local_java_repository_impl, local = True, configure = True,", "\"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = runtime_name, ) if version == \"8\": default_java_toolchain(", "minor = parts[1] return minor return major def local_java_runtime(name, java_home,", "constrained by flag --java_runtime_version having value set to either name", "\"localjdk_setting\", values = {{\"java_runtime_version\": \"{local_jdk}\"}}, visibility = [\"//visibility:private\"], ) toolchain(", "attr.string(), \"build_file\": attr.label(), }, ) def local_java_repository(name, java_home, version =", "[\"//visibility:private\"], ) toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":localjdk_setting\"], toolchain_type", "\"virtual\" targets are created, which fail only when actually needed.", "None, visibility = [\"//visibility:public\"]): \"\"\"Defines a java_runtime target together with", "= \".exe\" if repository_ctx.os.name.lower().find(\"windows\") != -1 else \"\" java_bin =", "version: Version of the JDK. runtime_name: name of java_runtime target", "\"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary = \"bin/java\" + extension,", "\"_name_version_setting\", values = {\"java_runtime_version\": name + \"_\" + version}, visibility", "= parts[1] return minor return major def local_java_runtime(name, java_home, version,", "\"\"\" if runtime_name == None: runtime_name = name native.java_runtime( name", "str(version), java_runtime = runtime_name, ) # else version is not", "fail('The path indicated by the \"java_home\" attribute \"%s\" (absolute: \"%s\")", "java_home, version = \"\", build_file = None): \"\"\"Registers a runtime", "are created for --java_language_version flags values between 8 and version", ") native.config_setting( name = name + \"_name_setting\", values = {\"java_runtime_version\":", "name}, visibility = [\"//visibility:private\"], ) native.config_setting( name = name +", "_local_java_repository_rule(name = name, java_home = java_home, version = version, build_file", "created for --java_language_version flags values between 8 and version (inclusive).", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "compile toolchain definitions. Java runtime toolchain is constrained by flag", "major def local_java_runtime(name, java_home, version, runtime_name = None, visibility =", "values = {\"java_runtime_version\": name + \"_\" + version}, visibility =", "target if it already exists. visibility: Visibility that will be", "= \"jdk\", header = \"Auto-Configuration Error:\", message = (\"Cannot find", "repository (e.g. \" + \"--java_runtime_version=remotejdk_11\") ) config_setting( name = \"localjdk_setting\",", "name = name + \"_toolchain_java8\", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version =", "runtime and compile toolchain definitions. Java runtime toolchain is constrained", "name + \"_toolchain_java\" + str(version), source_version = str(version), target_version =", "build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '\"jdk\"' if repository_ctx.attr.build_file else None", "range(8, int(version) + 1): default_java_toolchain( name = name + \"_toolchain_java\"", "configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version = version, java_runtime", "= {{\"java_runtime_version\": \"{local_jdk}\"}}, visibility = [\"//visibility:private\"], ) toolchain( name =", "--java_runtime_version flag having value of the \"name\" or \"version\" parameter.", "values = {\"java_runtime_version\": version}, visibility = [\"//visibility:private\"], ) native.config_setting( name", "\"jdk\", header = \"Auto-Configuration Error:\", message = (\"Cannot find Java", "unregistered compile toolchain. Toolchain resolution is constrained with --java_runtime_version flag", "creates an unregistered compile toolchain. Toolchain resolution is constrained with", "of properties separated with newlines: # \" java.vendor.url.bug = ...", "than the newer versions. Args: name: name of the target.", "strip_properties if property.startswith(\"java.version = \")] if len(version_property) != 1: return", "for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build file template,", "\"name\" or \"version\" parameter. Java compile toolchains are created for", "repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build file template, when JDK does", "\"1\": # handles versions below 1.8 minor = parts[1] return", "name + \"_version_setting\", values = {\"java_runtime_version\": version}, visibility = [\"//visibility:private\"],", "use this file except in compliance with the License. #", "java_runtime = runtime_name, ) # else version is not recognized", "local_java_runtime_macro, ) # Symlink all files for file in repository_ctx.path(java_home).readdir():", "header = \"Auto-Configuration Error:\", message = (\"Cannot find Java binary", "by the \"java_home\" attribute \"%s\" (absolute: \"%s\") ' + \"does", "str(version), source_version = str(version), target_version = str(version), java_runtime = runtime_name,", "reserved. # # Licensed under the Apache License, Version 2.0", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "for compilation. This requires a different configuration for JDK8 than", "= runtime_name, ) # else version is not recognized and", "local_java_runtime(name, java_home, version, runtime_name = None, visibility = [\"//visibility:public\"]): \"\"\"Defines", "file for local_java_repository\\n\" + \"workspace(name = \\\"{name}\\\")\\n\".format(name = repository_ctx.name), )", "License. # You may obtain a copy of the License", "all files for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build", "JAVA_HOME, \" + \"PATH or specify Java from remote repository", "java_bin.exists: # Java binary does not exist repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format(", "under the License is distributed on an \"AS IS\" BASIS,", "+ \"_toolchain_java8\", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version =", "License for the specific language governing permissions and # limitations", "build_file + local_java_runtime_macro, ) # Symlink all files for file", "# else version is not recognized and no compilation toolchains", "parts[0] if len(parts) == 1: return major elif major ==", "''' _local_java_repository_rule = repository_rule( implementation = _local_java_repository_impl, local = True,", "# Copyright 2020 The Bazel Authors. All rights reserved. #", "\" java.version.date = 2020-11-05\\\" strip_properties = [property.strip() for property in", "# \" java.version.date = 2020-11-05\\\" strip_properties = [property.strip() for property", "context \"\"\" java_home = repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if not", "visibility = [\"//visibility:private\"], ) native.config_setting( name = name + \"_version_setting\",", "version version = repository_ctx.attr.version if repository_ctx.attr.version != \"\" else _detect_java_version(repository_ctx,", "exist repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary = \"bin/java\"", "name of java_runtime target if it already exists. visibility: Visibility", "\"default_java_toolchain\") def _detect_java_version(repository_ctx, java_bin): properties_out = repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr # This", "parameter. Java compile toolchains are created for --java_language_version flags values", "newlines: # \" java.vendor.url.bug = ... \\n\" # \" java.version", "\"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def _detect_java_version(repository_ctx, java_bin): properties_out = repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr #", "property in properties_out.splitlines()] version_property = [property for property in strip_properties", "JDK8 than the newer versions. Args: name: name of the", "2020 The Bazel Authors. All rights reserved. # # Licensed", "else version is not recognized and no compilation toolchains are", "= str(version), target_version = str(version), java_runtime = runtime_name, ) #", "in compliance with the License. # You may obtain a", "name = name + \"_toolchain_java\" + str(version), source_version = str(version),", "for compilation. If there is no JDK \"virtual\" targets are", "software # distributed under the License is distributed on an", "type(\"\") and version.isdigit() and int(version) > 8: for version in", "+ \"_name_version_setting\", values = {\"java_runtime_version\": name + \"_\" + version},", "= {\"java_runtime_version\": name}, visibility = [\"//visibility:private\"], ) native.config_setting( name =", "compile toolchains are created for --java_language_version flags values between 8", "actual = select({ name + \"_name_setting\": name + \"_name_setting\", name", "local_java_repository\\n\" + \"workspace(name = \\\"{name}\\\")\\n\".format(name = repository_ctx.name), ) extension =", "None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '\"jdk\"' if repository_ctx.attr.build_file else", "when actually needed. Args: name: A unique name for this", "select({ name + \"_name_setting\": name + \"_name_setting\", name + \"_version_setting\":", "name = \"runtime_toolchain_definition\", target_settings = [\":%s_settings_alias\" % name], toolchain_type =", "= [\":%s_settings_alias\" % name], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = runtime_name,", "\\n\" # \" java.version = 11.0.8\\n\" # \" java.version.date =", "find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME,", "= \"):] parts = version_value.split(\".\") major = parts[0] if len(parts)", "target. java_home: Path to the JDK. version: Version of the", "local JDK and creates an unregistered compile toolchain. Toolchain resolution", "-1 else \"\" java_bin = java_home_path.get_child(\"bin\").get_child(\"java\" + extension) if not", "using \"local_java_runtime\" macro build_file = \"\" if repository_ctx.attr.build_file != None:", "java_runtime target together with Java runtime and compile toolchain definitions.", "\"java_home\" attribute \"%s\" (absolute: \"%s\") ' + \"does not exist.\"", "does not exist repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary", "if runtime_name == None: runtime_name = name native.java_runtime( name =", "extension, java_home = java_home, ), False, ) return # Detect", "version) repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' + build_file + local_java_runtime_macro, )", "property.startswith(\"java.version = \")] if len(version_property) != 1: return None version_value", "= version, java_runtime = runtime_name, ) elif type(version) == type(\"\")", "or specify Java from remote repository (e.g. \" + \"--java_runtime_version=remotejdk_11\")", "JDK \"virtual\" targets are created, which fail only when actually", "# Detect version version = repository_ctx.attr.version if repository_ctx.attr.version != \"\"", "+ \"_toolchain_java\" + str(version), source_version = str(version), target_version = str(version),", "versions. Args: name: name of the target. java_home: Path to", ") return # Detect version version = repository_ctx.attr.version if repository_ctx.attr.version", "version, java_runtime = runtime_name, ) elif type(version) == type(\"\") and", "java_home, version, runtime_name = None, visibility = [\"//visibility:public\"]): \"\"\"Defines a", "JDK. version: Version of the JDK. runtime_name: name of java_runtime", "\"local_java_runtime\")\\n' + build_file + local_java_runtime_macro, ) # Symlink all files", "\"java_home\": attr.string(), \"version\": attr.string(), \"build_file\": attr.label(), }, ) def local_java_repository(name,", "str(version), target_version = str(version), java_runtime = runtime_name, ) # else", "and no compilation toolchains are predefined def _local_java_repository_impl(repository_ctx): \"\"\"Repository rule", "in strip_properties if property.startswith(\"java.version = \")] if len(version_property) != 1:", "# Symlink all files for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename)", "+ local_java_runtime_macro, ) # Symlink all files for file in", "a java_runtime target together with Java runtime and compile toolchain", "compilation. This requires a different configuration for JDK8 than the", "= java_home, visibility = visibility, ) native.config_setting( name = name", "\"\" if repository_ctx.attr.build_file != None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name =", "local_java_repository(name, java_home, version = \"\", build_file = None): \"\"\"Registers a", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "definitions. Java runtime toolchain is constrained by flag --java_runtime_version having", "1): default_java_toolchain( name = name + \"_toolchain_java\" + str(version), source_version", "runtime_name = %s, java_home = \"%s\", version = \"%s\", )", "[property for property in strip_properties if property.startswith(\"java.version = \")] if", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "property in strip_properties if property.startswith(\"java.version = \")] if len(version_property) !=", "configure = True, attrs = { \"java_home\": attr.string(), \"version\": attr.string(),", "= None, visibility = [\"//visibility:public\"]): \"\"\"Defines a java_runtime target together", "\"_version_setting\", values = {\"java_runtime_version\": version}, visibility = [\"//visibility:private\"], ) native.config_setting(", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "not exist repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary =", "to in writing, software # distributed under the License is", "= select({ name + \"_name_setting\": name + \"_name_setting\", name +", "and compile toolchain definitions. Java runtime toolchain is constrained by", "version == \"8\": default_java_toolchain( name = name + \"_toolchain_java8\", configuration", "minor return major def local_java_runtime(name, java_home, version, runtime_name = None,", "to the JDK. version: Version of the JDK. runtime_name: name", "# See the License for the specific language governing permissions", "runtime_name = None, visibility = [\"//visibility:public\"]): \"\"\"Defines a java_runtime target", "== type(\"\") and version.isdigit() and int(version) > 8: for version", "'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' + build_file + local_java_runtime_macro, ) # Symlink all", "if repository_ctx.attr.build_file else None local_java_runtime_macro = \"\"\" local_java_runtime( name =", "[\":%s_settings_alias\" % name], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = runtime_name, )", "or agreed to in writing, software # distributed under the", "java_home = java_home, version = version, build_file = build_file) native.register_toolchains(\"@\"", "{java_home}; either correct your JAVA_HOME, \" + \"PATH or specify", "java.vendor.url.bug = ... \\n\" # \" java.version = 11.0.8\\n\" #", "required by applicable law or agreed to in writing, software", "visibility = [\"//visibility:private\"], ) native.config_setting( name = name + \"_name_version_setting\",", "\"//conditions:default\": name + \"_name_version_setting\", }), visibility = [\"//visibility:private\"], ) native.toolchain(", "Java binary does not exist repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk =", "flag having value of the \"name\" or \"version\" parameter. Java", "int(version) > 8: for version in range(8, int(version) + 1):", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "path indicated by the \"java_home\" attribute \"%s\" (absolute: \"%s\") '", "None: runtime_name = name native.java_runtime( name = runtime_name, java_home =", "with the License. # You may obtain a copy of", "{\"java_runtime_version\": name}, visibility = [\"//visibility:private\"], ) native.config_setting( name = name", "implementation. Args: repository_ctx: repository context \"\"\" java_home = repository_ctx.attr.java_home java_home_path", "JDK. runtime_name: name of java_runtime target if it already exists.", "\"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' + build_file + local_java_runtime_macro, ) # Symlink", "if repository_ctx.attr.version != \"\" else _detect_java_version(repository_ctx, java_bin) # Prepare BUILD", "file template version: optionally java version \"\"\" _local_java_repository_rule(name = name,", "exists. visibility: Visibility that will be applied to the java", "and # limitations under the License. \"\"\"Rules for importing and", "{\"java_runtime_version\": version}, visibility = [\"//visibility:private\"], ) native.config_setting( name = name", "not java_home_path.exists: fail('The path indicated by the \"java_home\" attribute \"%s\"", "[\"//visibility:private\"], ) native.alias( name = name + \"_settings_alias\", actual =", "+ \"_name_setting\", values = {\"java_runtime_version\": name}, visibility = [\"//visibility:private\"], )", "= \":jdk\", ) ''' _local_java_repository_rule = repository_rule( implementation = _local_java_repository_impl,", "def local_java_runtime(name, java_home, version, runtime_name = None, visibility = [\"//visibility:public\"]):", "use the same (local) JDK for compilation. This requires a", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "target_settings = [\":localjdk_setting\"], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = \":jdk\", )", "which fail only when actually needed. Args: name: A unique", "= 2020-11-05\\\" strip_properties = [property.strip() for property in properties_out.splitlines()] version_property", "default_java_toolchain( name = name + \"_toolchain_java\" + str(version), source_version =", "version is not recognized and no compilation toolchains are predefined", "{{\"java_runtime_version\": \"{local_jdk}\"}}, visibility = [\"//visibility:private\"], ) toolchain( name = \"runtime_toolchain_definition\",", "False, ) return # Detect version version = repository_ctx.attr.version if", "distributed under the License is distributed on an \"AS IS\"", "[\":localjdk_setting\"], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = \":jdk\", ) ''' _local_java_repository_rule", "+ \"_name_setting\", name + \"_version_setting\": name + \"_version_setting\", \"//conditions:default\": name", "\"\" java_bin = java_home_path.get_child(\"bin\").get_child(\"java\" + extension) if not java_bin.exists: #", "= runtime_name, ) elif type(version) == type(\"\") and version.isdigit() and", "\"%s\", runtime_name = %s, java_home = \"%s\", version = \"%s\",", "_NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule( name = \"jdk\", header =", "for --java_language_version flags values between 8 and version (inclusive). Java", "= JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version = version, java_runtime =", "= \"bin/java\" + extension, java_home = java_home, ), False, )", "= name native.java_runtime( name = runtime_name, java_home = java_home, visibility", "java_bin = java_home_path.get_child(\"bin\").get_child(\"java\" + extension) if not java_bin.exists: # Java", "% name], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = runtime_name, ) if", "express or implied. # See the License for the specific", "else None local_java_runtime_macro = \"\"\" local_java_runtime( name = \"%s\", runtime_name", "name: A unique name for this rule. java_home: Location of", "except in compliance with the License. # You may obtain", "\".exe\" if repository_ctx.os.name.lower().find(\"windows\") != -1 else \"\" java_bin = java_home_path.get_child(\"bin\").get_child(\"java\"", "version = \"%s\", ) \"\"\" % (repository_ctx.name, runtime_name, java_home, version)", "License. \"\"\"Rules for importing and registering a local JDK.\"\"\" load(\":default_java_toolchain.bzl\",", "returns an indented list of properties separated with newlines: #", "\"%s\", ) \"\"\" % (repository_ctx.name, runtime_name, java_home, version) repository_ctx.file( \"BUILD.bazel\",", "optionally BUILD file template version: optionally java version \"\"\" _local_java_repository_rule(name", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "= runtime_name, ) if version == \"8\": default_java_toolchain( name =", "needed. Args: name: A unique name for this rule. java_home:", "= (\"Cannot find Java binary {java_binary} in {java_home}; either correct", "in {java_home}; either correct your JAVA_HOME, \" + \"PATH or", "= [\"//visibility:private\"], ) toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":localjdk_setting\"],", "writing, software # distributed under the License is distributed on", "load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def _detect_java_version(repository_ctx, java_bin): properties_out = repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr", "visibility = visibility, ) native.config_setting( name = name + \"_name_setting\",", "you may not use this file except in compliance with", "not exist.\" % (java_home, str(java_home_path))) repository_ctx.file( \"WORKSPACE\", \"# DO NOT", ") native.config_setting( name = name + \"_name_version_setting\", values = {\"java_runtime_version\":", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "'\"jdk\"' if repository_ctx.attr.build_file else None local_java_runtime_macro = \"\"\" local_java_runtime( name", "major == \"1\": # handles versions below 1.8 minor =", "compilation toolchains are predefined def _local_java_repository_impl(repository_ctx): \"\"\"Repository rule local_java_repository implementation.", "for local_java_repository\\n\" + \"workspace(name = \\\"{name}\\\")\\n\".format(name = repository_ctx.name), ) extension", ") # else version is not recognized and no compilation", "parts = version_value.split(\".\") major = parts[0] if len(parts) == 1:", "argument. Java compile toolchains are created for --java_language_version flags values", "runtime_name, java_home, version) repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' + build_file +", "runtime_name = name native.java_runtime( name = runtime_name, java_home = java_home,", "\"{local_jdk}\"}}, visibility = [\"//visibility:private\"], ) toolchain( name = \"runtime_toolchain_definition\", target_settings", "+ \"_name_version_setting\", }), visibility = [\"//visibility:private\"], ) native.toolchain( name =", "# \" java.vendor.url.bug = ... \\n\" # \" java.version =", "(inclusive). Java compile toolchains use the same (local) JDK for", "different configuration for JDK8 than the newer versions. Args: name:", "CONDITIONS OF ANY KIND, either express or implied. # See", "JDK for compilation. If there is no JDK \"virtual\" targets", "if len(version_property) != 1: return None version_value = version_property[0][len(\"java.version =", "compile toolchain. Toolchain resolution is constrained with --java_runtime_version flag having", "Prepare BUILD file using \"local_java_runtime\" macro build_file = \"\" if", "True, configure = True, attrs = { \"java_home\": attr.string(), \"version\":", "between 8 and version (inclusive). Java compile toolchains use the", "... \\n\" # \" java.version = 11.0.8\\n\" # \" java.version.date", "DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\\n\" +", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "'''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule( name = \"jdk\", header = \"Auto-Configuration Error:\",", "A unique name for this rule. java_home: Location of the", "toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = \":jdk\", ) ''' _local_java_repository_rule =", "\"_toolchain_java8\", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version = version,", "are created, which fail only when actually needed. Args: name:", "properties separated with newlines: # \" java.vendor.url.bug = ... \\n\"", "version, target_version = version, java_runtime = runtime_name, ) elif type(version)", "either name or version argument. Java compile toolchains are created", "toolchains are created for --java_language_version flags values between 8 and", "registering a local JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def _detect_java_version(repository_ctx, java_bin):", "\"\", build_file = None): \"\"\"Registers a runtime toolchain for local", "\"):] parts = version_value.split(\".\") major = parts[0] if len(parts) ==", "runtime target \"\"\" if runtime_name == None: runtime_name = name", "repository_ctx.attr.build_file else None local_java_runtime_macro = \"\"\" local_java_runtime( name = \"%s\",", "major elif major == \"1\": # handles versions below 1.8", "JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def _detect_java_version(repository_ctx, java_bin): properties_out = repository_ctx.execute([java_bin,", "= name, java_home = java_home, version = version, build_file =", "the JDK imported. build_file: optionally BUILD file template version: optionally", "= repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '\"jdk\"' if repository_ctx.attr.build_file else None local_java_runtime_macro", "+ \"--java_runtime_version=remotejdk_11\") ) config_setting( name = \"localjdk_setting\", values = {{\"java_runtime_version\":", "\"# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\\n\"", "repository_ctx.attr.build_file != None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '\"jdk\"' if", "java_home = java_home, ), False, ) return # Detect version", "# handles versions below 1.8 minor = parts[1] return minor", "= version, target_version = version, java_runtime = runtime_name, ) elif", "toolchain definitions. Java runtime toolchain is constrained by flag --java_runtime_version", ") config_setting( name = \"localjdk_setting\", values = {{\"java_runtime_version\": \"{local_jdk}\"}}, visibility", "repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if not java_home_path.exists: fail('The path indicated", "file.basename) # Build file template, when JDK does not exist", "JDK does not exist _NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule( name", "\" java.version = 11.0.8\\n\" # \" java.version.date = 2020-11-05\\\" strip_properties", "if it already exists. visibility: Visibility that will be applied", "extension = \".exe\" if repository_ctx.os.name.lower().find(\"windows\") != -1 else \"\" java_bin", "build_file = None): \"\"\"Registers a runtime toolchain for local JDK", ") native.alias( name = name + \"_settings_alias\", actual = select({", "java.version = 11.0.8\\n\" # \" java.version.date = 2020-11-05\\\" strip_properties =", "= \"%s\", runtime_name = %s, java_home = \"%s\", version =", "Copyright 2020 The Bazel Authors. All rights reserved. # #", "(local) JDK for compilation. If there is no JDK \"virtual\"", "= visibility, ) native.config_setting( name = name + \"_name_setting\", values", "OR CONDITIONS OF ANY KIND, either express or implied. #", "a local JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def _detect_java_version(repository_ctx, java_bin): properties_out", "runtime_name: name of java_runtime target if it already exists. visibility:", "1: return None version_value = version_property[0][len(\"java.version = \"):] parts =", "toolchain is constrained by flag --java_runtime_version having value set to", "runtime_name, ) elif type(version) == type(\"\") and version.isdigit() and int(version)", "toolchain. Toolchain resolution is constrained with --java_runtime_version flag having value", "# Build file template, when JDK does not exist _NOJDK_BUILD_TPL", "[property.strip() for property in properties_out.splitlines()] version_property = [property for property", "name + \"_name_setting\", name + \"_version_setting\": name + \"_version_setting\", \"//conditions:default\":", "the License is distributed on an \"AS IS\" BASIS, #", "rule local_java_repository implementation. Args: repository_ctx: repository context \"\"\" java_home =", "strip_properties = [property.strip() for property in properties_out.splitlines()] version_property = [property", "!= 1: return None version_value = version_property[0][len(\"java.version = \"):] parts", "exist _NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule( name = \"jdk\", header", "an unregistered compile toolchain. Toolchain resolution is constrained with --java_runtime_version", "Args: name: A unique name for this rule. java_home: Location", "java.version.date = 2020-11-05\\\" strip_properties = [property.strip() for property in properties_out.splitlines()]", "specify Java from remote repository (e.g. \" + \"--java_runtime_version=remotejdk_11\") )", "[\"//visibility:private\"], ) native.toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":%s_settings_alias\" %", "Visibility that will be applied to the java runtime target", "list of properties separated with newlines: # \" java.vendor.url.bug =", "= _local_java_repository_impl, local = True, configure = True, attrs =", "), False, ) return # Detect version version = repository_ctx.attr.version", "= repository_rule( implementation = _local_java_repository_impl, local = True, configure =", "return minor return major def local_java_runtime(name, java_home, version, runtime_name =", "= java_home, ), False, ) return # Detect version version", "= name + \"_settings_alias\", actual = select({ name + \"_name_setting\":", "native.alias( name = name + \"_settings_alias\", actual = select({ name", "when JDK does not exist _NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule(", "either correct your JAVA_HOME, \" + \"PATH or specify Java", "type(version) == type(\"\") and version.isdigit() and int(version) > 8: for", "(absolute: \"%s\") ' + \"does not exist.\" % (java_home, str(java_home_path)))", "= java_home, version = version, build_file = build_file) native.register_toolchains(\"@\" +", "for version in range(8, int(version) + 1): default_java_toolchain( name =", "flag --java_runtime_version having value set to either name or version", "local_java_runtime( name = \"%s\", runtime_name = %s, java_home = \"%s\",", "\"8\": default_java_toolchain( name = name + \"_toolchain_java8\", configuration = JVM8_TOOLCHAIN_CONFIGURATION,", "of java_runtime target if it already exists. visibility: Visibility that", "== \"8\": default_java_toolchain( name = name + \"_toolchain_java8\", configuration =", "does not exist _NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule( name =", "target_version = str(version), java_runtime = runtime_name, ) # else version", "attrs = { \"java_home\": attr.string(), \"version\": attr.string(), \"build_file\": attr.label(), },", "law or agreed to in writing, software # distributed under", "in range(8, int(version) + 1): default_java_toolchain( name = name +", "= str(version), java_runtime = runtime_name, ) # else version is", "message = (\"Cannot find Java binary {java_binary} in {java_home}; either", "generated WORKSPACE file for local_java_repository\\n\" + \"workspace(name = \\\"{name}\\\")\\n\".format(name =", "= \"localjdk_setting\", values = {{\"java_runtime_version\": \"{local_jdk}\"}}, visibility = [\"//visibility:private\"], )", "version argument. Java compile toolchains are created for --java_language_version flags", "--java_runtime_version having value set to either name or version argument.", "\"_settings_alias\", actual = select({ name + \"_name_setting\": name + \"_name_setting\",", "and int(version) > 8: for version in range(8, int(version) +", "1: return major elif major == \"1\": # handles versions", "> 8: for version in range(8, int(version) + 1): default_java_toolchain(", "(java_home, str(java_home_path))) repository_ctx.file( \"WORKSPACE\", \"# DO NOT EDIT: automatically generated", "\"%s\", version = \"%s\", ) \"\"\" % (repository_ctx.name, runtime_name, java_home,", "value of the \"name\" or \"version\" parameter. Java compile toolchains", "unique name for this rule. java_home: Location of the JDK", ") native.config_setting( name = name + \"_version_setting\", values = {\"java_runtime_version\":", "\"_version_setting\", \"//conditions:default\": name + \"_name_version_setting\", }), visibility = [\"//visibility:private\"], )", "' + \"does not exist.\" % (java_home, str(java_home_path))) repository_ctx.file( \"WORKSPACE\",", "may obtain a copy of the License at # #", "implementation = _local_java_repository_impl, local = True, configure = True, attrs", "of the \"name\" or \"version\" parameter. Java compile toolchains are", "repository_ctx.symlink(file, file.basename) # Build file template, when JDK does not", "by flag --java_runtime_version having value set to either name or", "properties_out = repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr # This returns an indented list", "name of the target. java_home: Path to the JDK. version:", "\"build_file\": attr.label(), }, ) def local_java_repository(name, java_home, version = \"\",", "repository_ctx.name), ) extension = \".exe\" if repository_ctx.os.name.lower().find(\"windows\") != -1 else", "there is no JDK \"virtual\" targets are created, which fail", "+ \"_version_setting\", \"//conditions:default\": name + \"_name_version_setting\", }), visibility = [\"//visibility:private\"],", "= None): \"\"\"Registers a runtime toolchain for local JDK and", "# This returns an indented list of properties separated with", "no JDK \"virtual\" targets are created, which fail only when", "= \"\" if repository_ctx.attr.build_file != None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "BUILD file using \"local_java_runtime\" macro build_file = \"\" if repository_ctx.attr.build_file", "java runtime target \"\"\" if runtime_name == None: runtime_name =", "len(parts) == 1: return major elif major == \"1\": #", "this rule. java_home: Location of the JDK imported. build_file: optionally", "may not use this file except in compliance with the", "version.isdigit() and int(version) > 8: for version in range(8, int(version)", "if not java_home_path.exists: fail('The path indicated by the \"java_home\" attribute", "a runtime toolchain for local JDK and creates an unregistered", "value set to either name or version argument. Java compile", "attribute \"%s\" (absolute: \"%s\") ' + \"does not exist.\" %", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "fail_rule( name = \"jdk\", header = \"Auto-Configuration Error:\", message =", "the same (local) JDK for compilation. If there is no", "name = \"jdk\", header = \"Auto-Configuration Error:\", message = (\"Cannot", "this file except in compliance with the License. # You", "repository_ctx.os.name.lower().find(\"windows\") != -1 else \"\" java_bin = java_home_path.get_child(\"bin\").get_child(\"java\" + extension)", "java_home: Path to the JDK. version: Version of the JDK.", "be applied to the java runtime target \"\"\" if runtime_name", "!= -1 else \"\" java_bin = java_home_path.get_child(\"bin\").get_child(\"java\" + extension) if", "for JDK8 than the newer versions. Args: name: name of", "%s, java_home = \"%s\", version = \"%s\", ) \"\"\" %", "\"\"\" % (repository_ctx.name, runtime_name, java_home, version) repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n'", "repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' + build_file + local_java_runtime_macro, ) #", "same (local) JDK for compilation. If there is no JDK", "= name + \"_name_setting\", values = {\"java_runtime_version\": name}, visibility =", "= \"\", build_file = None): \"\"\"Registers a runtime toolchain for", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "native.config_setting( name = name + \"_version_setting\", values = {\"java_runtime_version\": version},", "_local_java_repository_impl, local = True, configure = True, attrs = {", ") extension = \".exe\" if repository_ctx.os.name.lower().find(\"windows\") != -1 else \"\"", "None local_java_runtime_macro = \"\"\" local_java_runtime( name = \"%s\", runtime_name =", "# # Licensed under the Apache License, Version 2.0 (the", "visibility = [\"//visibility:public\"]): \"\"\"Defines a java_runtime target together with Java", "Args: repository_ctx: repository context \"\"\" java_home = repository_ctx.attr.java_home java_home_path =", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Version of the JDK. runtime_name: name of java_runtime target if", "\"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = \":jdk\", ) ''' _local_java_repository_rule = repository_rule( implementation", "optionally java version \"\"\" _local_java_repository_rule(name = name, java_home = java_home,", "NOT EDIT: automatically generated WORKSPACE file for local_java_repository\\n\" + \"workspace(name", "\"workspace(name = \\\"{name}\\\")\\n\".format(name = repository_ctx.name), ) extension = \".exe\" if", "+ \"_\" + version}, visibility = [\"//visibility:private\"], ) native.alias( name", "java_home, version = version, build_file = build_file) native.register_toolchains(\"@\" + name", "# Java binary does not exist repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk", "repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '\"jdk\"' if repository_ctx.attr.build_file else None local_java_runtime_macro =", "flags values between 8 and version (inclusive). Java compile toolchains", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "name + \"_toolchain_java8\", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version", "{ \"java_home\": attr.string(), \"version\": attr.string(), \"build_file\": attr.label(), }, ) def", ") toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":localjdk_setting\"], toolchain_type =", "and creates an unregistered compile toolchain. Toolchain resolution is constrained", "attr.string(), \"version\": attr.string(), \"build_file\": attr.label(), }, ) def local_java_repository(name, java_home,", "template, when JDK does not exist _NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\")", "1.8 minor = parts[1] return minor return major def local_java_runtime(name,", "[\"//visibility:public\"]): \"\"\"Defines a java_runtime target together with Java runtime and", "build_file = \"\" if repository_ctx.attr.build_file != None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file))", "files for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build file", "constrained with --java_runtime_version flag having value of the \"name\" or", "correct your JAVA_HOME, \" + \"PATH or specify Java from", "the target. java_home: Path to the JDK. version: Version of", "visibility, ) native.config_setting( name = name + \"_name_setting\", values =", "= [property.strip() for property in properties_out.splitlines()] version_property = [property for", "rights reserved. # # Licensed under the Apache License, Version", "java_runtime = runtime_name, ) elif type(version) == type(\"\") and version.isdigit()", "the JDK. runtime_name: name of java_runtime target if it already", "def _detect_java_version(repository_ctx, java_bin): properties_out = repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr # This returns", "Java runtime toolchain is constrained by flag --java_runtime_version having value", "+ \"_name_setting\": name + \"_name_setting\", name + \"_version_setting\": name +", "name + \"_name_version_setting\", values = {\"java_runtime_version\": name + \"_\" +", "+ extension) if not java_bin.exists: # Java binary does not", "java_home, visibility = visibility, ) native.config_setting( name = name +", "file template, when JDK does not exist _NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\",", "This requires a different configuration for JDK8 than the newer", "under the License. \"\"\"Rules for importing and registering a local", "= repository_ctx.name, java_binary = \"bin/java\" + extension, java_home = java_home,", "If there is no JDK \"virtual\" targets are created, which", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "return major elif major == \"1\": # handles versions below", "importing and registering a local JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def", "elif type(version) == type(\"\") and version.isdigit() and int(version) > 8:", "8 and version (inclusive). Java compile toolchains use the same", "already exists. visibility: Visibility that will be applied to the", "name + \"_name_version_setting\", }), visibility = [\"//visibility:private\"], ) native.toolchain( name", "= [property for property in strip_properties if property.startswith(\"java.version = \")]", "properties_out.splitlines()] version_property = [property for property in strip_properties if property.startswith(\"java.version", "java_home = java_home, visibility = visibility, ) native.config_setting( name =", "actually needed. Args: name: A unique name for this rule.", "from remote repository (e.g. \" + \"--java_runtime_version=remotejdk_11\") ) config_setting( name", "= repository_ctx.path(java_home) if not java_home_path.exists: fail('The path indicated by the", "the \"name\" or \"version\" parameter. Java compile toolchains are created", "runtime_name = '\"jdk\"' if repository_ctx.attr.build_file else None local_java_runtime_macro = \"\"\"", "or implied. # See the License for the specific language", "in properties_out.splitlines()] version_property = [property for property in strip_properties if", "else _detect_java_version(repository_ctx, java_bin) # Prepare BUILD file using \"local_java_runtime\" macro", "+ version}, visibility = [\"//visibility:private\"], ) native.alias( name = name", "= \"runtime_toolchain_definition\", target_settings = [\":localjdk_setting\"], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain =", "None): \"\"\"Registers a runtime toolchain for local JDK and creates", "newer versions. Args: name: name of the target. java_home: Path", "for local JDK and creates an unregistered compile toolchain. Toolchain", "file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build file template, when", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "= {\"java_runtime_version\": name + \"_\" + version}, visibility = [\"//visibility:private\"],", "and version.isdigit() and int(version) > 8: for version in range(8,", "with --java_runtime_version flag having value of the \"name\" or \"version\"", "[\"//visibility:private\"], ) native.config_setting( name = name + \"_name_version_setting\", values =", "_detect_java_version(repository_ctx, java_bin): properties_out = repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr # This returns an", "name + \"_settings_alias\", actual = select({ name + \"_name_setting\": name", "\"%s\") ' + \"does not exist.\" % (java_home, str(java_home_path))) repository_ctx.file(", "compile toolchains use the same (local) JDK for compilation. If", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= parts[0] if len(parts) == 1: return major elif major", "Toolchain resolution is constrained with --java_runtime_version flag having value of", "= 11.0.8\\n\" # \" java.version.date = 2020-11-05\\\" strip_properties = [property.strip()", "= \"%s\", ) \"\"\" % (repository_ctx.name, runtime_name, java_home, version) repository_ctx.file(", "repository_ctx: repository context \"\"\" java_home = repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home)", "toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = runtime_name, ) if version ==", "\"\"\" _local_java_repository_rule(name = name, java_home = java_home, version = version,", "toolchain = runtime_name, ) if version == \"8\": default_java_toolchain( name", "java_bin) # Prepare BUILD file using \"local_java_runtime\" macro build_file =", "with newlines: # \" java.vendor.url.bug = ... \\n\" # \"", "to either name or version argument. Java compile toolchains are", "= [\"//visibility:private\"], ) native.toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":%s_settings_alias\"", "binary does not exist repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name,", "config_setting( name = \"localjdk_setting\", values = {{\"java_runtime_version\": \"{local_jdk}\"}}, visibility =", "name + \"_version_setting\", \"//conditions:default\": name + \"_name_version_setting\", }), visibility =", "(the \"License\"); # you may not use this file except", "\"does not exist.\" % (java_home, str(java_home_path))) repository_ctx.file( \"WORKSPACE\", \"# DO", "# you may not use this file except in compliance", "\"_\" + version}, visibility = [\"//visibility:private\"], ) native.alias( name =", "_local_java_repository_rule = repository_rule( implementation = _local_java_repository_impl, local = True, configure", "resolution is constrained with --java_runtime_version flag having value of the", "values between 8 and version (inclusive). Java compile toolchains use", "2020-11-05\\\" strip_properties = [property.strip() for property in properties_out.splitlines()] version_property =", "repository_ctx.name, java_binary = \"bin/java\" + extension, java_home = java_home, ),", "(\"Cannot find Java binary {java_binary} in {java_home}; either correct your", "= { \"java_home\": attr.string(), \"version\": attr.string(), \"build_file\": attr.label(), }, )", "% (repository_ctx.name, runtime_name, java_home, version) repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' +", "the java runtime target \"\"\" if runtime_name == None: runtime_name", "str(java_home_path))) repository_ctx.file( \"WORKSPACE\", \"# DO NOT EDIT: automatically generated WORKSPACE", "= version, build_file = build_file) native.register_toolchains(\"@\" + name + \"//:runtime_toolchain_definition\")", "with Java runtime and compile toolchain definitions. Java runtime toolchain", "is constrained with --java_runtime_version flag having value of the \"name\"", "the \"java_home\" attribute \"%s\" (absolute: \"%s\") ' + \"does not", "visibility = [\"//visibility:private\"], ) native.toolchain( name = \"runtime_toolchain_definition\", target_settings =", "\" java.vendor.url.bug = ... \\n\" # \" java.version = 11.0.8\\n\"", "name = name + \"_name_version_setting\", values = {\"java_runtime_version\": name +", "Java runtime and compile toolchain definitions. Java runtime toolchain is", "# # Unless required by applicable law or agreed to", "name = \"runtime_toolchain_definition\", target_settings = [\":localjdk_setting\"], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain", "created, which fail only when actually needed. Args: name: A", "name, java_home = java_home, version = version, build_file = build_file)", "version}, visibility = [\"//visibility:private\"], ) native.alias( name = name +", "= name + \"_toolchain_java\" + str(version), source_version = str(version), target_version", "\"\"\"Repository rule local_java_repository implementation. Args: repository_ctx: repository context \"\"\" java_home", "!= None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '\"jdk\"' if repository_ctx.attr.build_file", "= repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr # This returns an indented list of", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "else \"\" java_bin = java_home_path.get_child(\"bin\").get_child(\"java\" + extension) if not java_bin.exists:", "Version 2.0 (the \"License\"); # you may not use this", "\"_name_setting\", name + \"_version_setting\": name + \"_version_setting\", \"//conditions:default\": name +", "# \" java.version = 11.0.8\\n\" # \" java.version.date = 2020-11-05\\\"", "native.config_setting( name = name + \"_name_version_setting\", values = {\"java_runtime_version\": name", "# Prepare BUILD file using \"local_java_runtime\" macro build_file = \"\"", "\"\"\"Rules for importing and registering a local JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\",", "_local_java_repository_impl(repository_ctx): \"\"\"Repository rule local_java_repository implementation. Args: repository_ctx: repository context \"\"\"", "automatically generated WORKSPACE file for local_java_repository\\n\" + \"workspace(name = \\\"{name}\\\")\\n\".format(name", "= \"\"\" local_java_runtime( name = \"%s\", runtime_name = %s, java_home", "name for this rule. java_home: Location of the JDK imported.", "runtime_name, ) # else version is not recognized and no", "def local_java_repository(name, java_home, version = \"\", build_file = None): \"\"\"Registers", "\"_name_setting\", values = {\"java_runtime_version\": name}, visibility = [\"//visibility:private\"], ) native.config_setting(", "to the java runtime target \"\"\" if runtime_name == None:", "target \"\"\" if runtime_name == None: runtime_name = name native.java_runtime(", "name], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = runtime_name, ) if version", "version, runtime_name = None, visibility = [\"//visibility:public\"]): \"\"\"Defines a java_runtime", "below 1.8 minor = parts[1] return minor return major def", "java_home_path.exists: fail('The path indicated by the \"java_home\" attribute \"%s\" (absolute:", "implied. # See the License for the specific language governing", "parts[1] return minor return major def local_java_runtime(name, java_home, version, runtime_name", "\\\"{name}\\\")\\n\".format(name = repository_ctx.name), ) extension = \".exe\" if repository_ctx.os.name.lower().find(\"windows\") !=", "a different configuration for JDK8 than the newer versions. Args:", "under the Apache License, Version 2.0 (the \"License\"); # you", "or \"version\" parameter. Java compile toolchains are created for --java_language_version", "language governing permissions and # limitations under the License. \"\"\"Rules", ") native.toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":%s_settings_alias\" % name],", "\"\"\" local_java_runtime( name = \"%s\", runtime_name = %s, java_home =", "+ build_file + local_java_runtime_macro, ) # Symlink all files for", "is not recognized and no compilation toolchains are predefined def", "8: for version in range(8, int(version) + 1): default_java_toolchain( name", "same (local) JDK for compilation. This requires a different configuration", "repository_ctx.attr.version != \"\" else _detect_java_version(repository_ctx, java_bin) # Prepare BUILD file", "\"version\" parameter. Java compile toolchains are created for --java_language_version flags", "Location of the JDK imported. build_file: optionally BUILD file template", "JDK for compilation. This requires a different configuration for JDK8", "not java_bin.exists: # Java binary does not exist repository_ctx.file( \"BUILD.bazel\",", "All rights reserved. # # Licensed under the Apache License,", "visibility = [\"//visibility:private\"], ) native.alias( name = name + \"_settings_alias\",", "\"\" else _detect_java_version(repository_ctx, java_bin) # Prepare BUILD file using \"local_java_runtime\"", "by applicable law or agreed to in writing, software #", "or version argument. Java compile toolchains are created for --java_language_version", "version = \"\", build_file = None): \"\"\"Registers a runtime toolchain", "= name + \"_toolchain_java8\", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version = version,", ") def local_java_repository(name, java_home, version = \"\", build_file = None):", "(local) JDK for compilation. This requires a different configuration for", "visibility: Visibility that will be applied to the java runtime", "recognized and no compilation toolchains are predefined def _local_java_repository_impl(repository_ctx): \"\"\"Repository", "remote repository (e.g. \" + \"--java_runtime_version=remotejdk_11\") ) config_setting( name =", "WORKSPACE file for local_java_repository\\n\" + \"workspace(name = \\\"{name}\\\")\\n\".format(name = repository_ctx.name),", "version = repository_ctx.attr.version if repository_ctx.attr.version != \"\" else _detect_java_version(repository_ctx, java_bin)", "name + \"_version_setting\": name + \"_version_setting\", \"//conditions:default\": name + \"_name_version_setting\",", "# limitations under the License. \"\"\"Rules for importing and registering", "\"--java_runtime_version=remotejdk_11\") ) config_setting( name = \"localjdk_setting\", values = {{\"java_runtime_version\": \"{local_jdk}\"}},", "for property in strip_properties if property.startswith(\"java.version = \")] if len(version_property)", "--java_language_version flags values between 8 and version (inclusive). Java compile", "macro build_file = \"\" if repository_ctx.attr.build_file != None: build_file =", "+ str(version), source_version = str(version), target_version = str(version), java_runtime =", "toolchains are predefined def _local_java_repository_impl(repository_ctx): \"\"\"Repository rule local_java_repository implementation. Args:", "if len(parts) == 1: return major elif major == \"1\":", "if version == \"8\": default_java_toolchain( name = name + \"_toolchain_java8\",", "local_java_runtime_macro = \"\"\" local_java_runtime( name = \"%s\", runtime_name = %s,", "together with Java runtime and compile toolchain definitions. Java runtime", "return major def local_java_runtime(name, java_home, version, runtime_name = None, visibility", "binary {java_binary} in {java_home}; either correct your JAVA_HOME, \" +", "Authors. All rights reserved. # # Licensed under the Apache", ") elif type(version) == type(\"\") and version.isdigit() and int(version) >", "= \"%s\", version = \"%s\", ) \"\"\" % (repository_ctx.name, runtime_name,", "+ \"_settings_alias\", actual = select({ name + \"_name_setting\": name +", "no compilation toolchains are predefined def _local_java_repository_impl(repository_ctx): \"\"\"Repository rule local_java_repository", "separated with newlines: # \" java.vendor.url.bug = ... \\n\" #", "for property in properties_out.splitlines()] version_property = [property for property in", "having value set to either name or version argument. Java", "= \"Auto-Configuration Error:\", message = (\"Cannot find Java binary {java_binary}", "\"Auto-Configuration Error:\", message = (\"Cannot find Java binary {java_binary} in", "native.java_runtime( name = runtime_name, java_home = java_home, visibility = visibility,", "= [\"//visibility:private\"], ) native.alias( name = name + \"_settings_alias\", actual", "+ \"_version_setting\": name + \"_version_setting\", \"//conditions:default\": name + \"_name_version_setting\", }),", "target_settings = [\":%s_settings_alias\" % name], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain =", "\"_version_setting\": name + \"_version_setting\", \"//conditions:default\": name + \"_name_version_setting\", }), visibility", "compile toolchains use the same (local) JDK for compilation. This", "= java_home_path.get_child(\"bin\").get_child(\"java\" + extension) if not java_bin.exists: # Java binary", "in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build file template, when JDK", "the same (local) JDK for compilation. This requires a different", "= ... \\n\" # \" java.version = 11.0.8\\n\" # \"", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", ") if version == \"8\": default_java_toolchain( name = name +", "= \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = runtime_name, ) if version == \"8\":", "Unless required by applicable law or agreed to in writing,", "int(version) + 1): default_java_toolchain( name = name + \"_toolchain_java\" +", "version}, visibility = [\"//visibility:private\"], ) native.config_setting( name = name +", "local_java_repository implementation. Args: repository_ctx: repository context \"\"\" java_home = repository_ctx.attr.java_home", "\"\"\" java_home = repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if not java_home_path.exists:", "name + \"_\" + version}, visibility = [\"//visibility:private\"], ) native.alias(", "the specific language governing permissions and # limitations under the", "Error:\", message = (\"Cannot find Java binary {java_binary} in {java_home};", "== 1: return major elif major == \"1\": # handles", "only when actually needed. Args: name: A unique name for", "name = name + \"_settings_alias\", actual = select({ name +", "version in range(8, int(version) + 1): default_java_toolchain( name = name", "= '\"jdk\"' if repository_ctx.attr.build_file else None local_java_runtime_macro = \"\"\" local_java_runtime(", "name native.java_runtime( name = runtime_name, java_home = java_home, visibility =", "applicable law or agreed to in writing, software # distributed", "native.toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":%s_settings_alias\" % name], toolchain_type", "_detect_java_version(repository_ctx, java_bin) # Prepare BUILD file using \"local_java_runtime\" macro build_file", "+ \"_version_setting\", values = {\"java_runtime_version\": version}, visibility = [\"//visibility:private\"], )", "= repository_ctx.attr.version if repository_ctx.attr.version != \"\" else _detect_java_version(repository_ctx, java_bin) #", "= \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = \":jdk\", ) ''' _local_java_repository_rule = repository_rule(", "not recognized and no compilation toolchains are predefined def _local_java_repository_impl(repository_ctx):", "\"%s\" (absolute: \"%s\") ' + \"does not exist.\" % (java_home,", "+ \"does not exist.\" % (java_home, str(java_home_path))) repository_ctx.file( \"WORKSPACE\", \"#", "\"local_java_runtime\" macro build_file = \"\" if repository_ctx.attr.build_file != None: build_file", "= {\"java_runtime_version\": version}, visibility = [\"//visibility:private\"], ) native.config_setting( name =", "name = name + \"_name_setting\", values = {\"java_runtime_version\": name}, visibility", "repository_ctx.attr.version if repository_ctx.attr.version != \"\" else _detect_java_version(repository_ctx, java_bin) # Prepare", "of the JDK imported. build_file: optionally BUILD file template version:", "name + \"_name_setting\", values = {\"java_runtime_version\": name}, visibility = [\"//visibility:private\"],", "in writing, software # distributed under the License is distributed", ") \"\"\" % (repository_ctx.name, runtime_name, java_home, version) repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\",", "source_version = str(version), target_version = str(version), java_runtime = runtime_name, )", "target together with Java runtime and compile toolchain definitions. Java", ") # Symlink all files for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file,", "= True, attrs = { \"java_home\": attr.string(), \"version\": attr.string(), \"build_file\":", "EDIT: automatically generated WORKSPACE file for local_java_repository\\n\" + \"workspace(name =", "None version_value = version_property[0][len(\"java.version = \"):] parts = version_value.split(\".\") major", "Bazel Authors. All rights reserved. # # Licensed under the", "= name + \"_version_setting\", values = {\"java_runtime_version\": version}, visibility =", "repository_ctx.path(java_home) if not java_home_path.exists: fail('The path indicated by the \"java_home\"", "= \\\"{name}\\\")\\n\".format(name = repository_ctx.name), ) extension = \".exe\" if repository_ctx.os.name.lower().find(\"windows\")", "build_file: optionally BUILD file template version: optionally java version \"\"\"", "Symlink all files for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) #", "= [\":localjdk_setting\"], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = \":jdk\", ) '''", "version_property = [property for property in strip_properties if property.startswith(\"java.version =", "versions below 1.8 minor = parts[1] return minor return major", "% (java_home, str(java_home_path))) repository_ctx.file( \"WORKSPACE\", \"# DO NOT EDIT: automatically", "repository_ctx.file( \"BUILD.bazel\", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary = \"bin/java\" +", "toolchains use the same (local) JDK for compilation. This requires", "True, attrs = { \"java_home\": attr.string(), \"version\": attr.string(), \"build_file\": attr.label(),", "targets are created, which fail only when actually needed. Args:", "Java compile toolchains are created for --java_language_version flags values between", "it already exists. visibility: Visibility that will be applied to", "runtime toolchain for local JDK and creates an unregistered compile", "runtime_name, ) if version == \"8\": default_java_toolchain( name = name", "_NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary = \"bin/java\" + extension, java_home", "\"runtime_toolchain_definition\", target_settings = [\":localjdk_setting\"], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain = \":jdk\",", "limitations under the License. \"\"\"Rules for importing and registering a", "= [\"//visibility:private\"], ) native.config_setting( name = name + \"_version_setting\", values", "BUILD file template version: optionally java version \"\"\" _local_java_repository_rule(name =", "+ \"workspace(name = \\\"{name}\\\")\\n\".format(name = repository_ctx.name), ) extension = \".exe\"", "compilation. If there is no JDK \"virtual\" targets are created,", "not exist _NOJDK_BUILD_TPL = '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule( name = \"jdk\",", "java_home, version) repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' + build_file + local_java_runtime_macro,", "fail only when actually needed. Args: name: A unique name", "+ extension, java_home = java_home, ), False, ) return #", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "indented list of properties separated with newlines: # \" java.vendor.url.bug", "that will be applied to the java runtime target \"\"\"", "License, Version 2.0 (the \"License\"); # you may not use", "}), visibility = [\"//visibility:private\"], ) native.toolchain( name = \"runtime_toolchain_definition\", target_settings", "indicated by the \"java_home\" attribute \"%s\" (absolute: \"%s\") ' +", "toolchain for local JDK and creates an unregistered compile toolchain.", "rule. java_home: Location of the JDK imported. build_file: optionally BUILD", "# You may obtain a copy of the License at", "java_binary = \"bin/java\" + extension, java_home = java_home, ), False,", "java version \"\"\" _local_java_repository_rule(name = name, java_home = java_home, version", "file using \"local_java_runtime\" macro build_file = \"\" if repository_ctx.attr.build_file !=", "\":jdk\", ) ''' _local_java_repository_rule = repository_rule( implementation = _local_java_repository_impl, local", "the newer versions. Args: name: name of the target. java_home:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= \")] if len(version_property) != 1: return None version_value =", "== \"1\": # handles versions below 1.8 minor = parts[1]", "runtime toolchain is constrained by flag --java_runtime_version having value set", "}, ) def local_java_repository(name, java_home, version = \"\", build_file =", "requires a different configuration for JDK8 than the newer versions.", "java_home, ), False, ) return # Detect version version =", "!= \"\" else _detect_java_version(repository_ctx, java_bin) # Prepare BUILD file using", "Java compile toolchains use the same (local) JDK for compilation.", "exist.\" % (java_home, str(java_home_path))) repository_ctx.file( \"WORKSPACE\", \"# DO NOT EDIT:", "= [\"//visibility:private\"], ) native.config_setting( name = name + \"_name_version_setting\", values", "local_jdk = repository_ctx.name, java_binary = \"bin/java\" + extension, java_home =", "the License. \"\"\"Rules for importing and registering a local JDK.\"\"\"", "version_value = version_property[0][len(\"java.version = \"):] parts = version_value.split(\".\") major =", "name = \"localjdk_setting\", values = {{\"java_runtime_version\": \"{local_jdk}\"}}, visibility = [\"//visibility:private\"],", "(e.g. \" + \"--java_runtime_version=remotejdk_11\") ) config_setting( name = \"localjdk_setting\", values", "\"-XshowSettings:properties\"]).stderr # This returns an indented list of properties separated", "repository context \"\"\" java_home = repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if", "repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr # This returns an indented list of properties", "if repository_ctx.os.name.lower().find(\"windows\") != -1 else \"\" java_bin = java_home_path.get_child(\"bin\").get_child(\"java\" +", "= True, configure = True, attrs = { \"java_home\": attr.string(),", "toolchains use the same (local) JDK for compilation. If there", "the License for the specific language governing permissions and #", "name = name + \"_version_setting\", values = {\"java_runtime_version\": version}, visibility", "java_bin): properties_out = repository_ctx.execute([java_bin, \"-XshowSettings:properties\"]).stderr # This returns an indented", "Apache License, Version 2.0 (the \"License\"); # you may not", "java_home = \"%s\", version = \"%s\", ) \"\"\" % (repository_ctx.name,", "major = parts[0] if len(parts) == 1: return major elif", "either express or implied. # See the License for the", "def _local_java_repository_impl(repository_ctx): \"\"\"Repository rule local_java_repository implementation. Args: repository_ctx: repository context", "toolchain = \":jdk\", ) ''' _local_java_repository_rule = repository_rule( implementation =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "source_version = version, target_version = version, java_runtime = runtime_name, )", "\")] if len(version_property) != 1: return None version_value = version_property[0][len(\"java.version", "your JAVA_HOME, \" + \"PATH or specify Java from remote", "len(version_property) != 1: return None version_value = version_property[0][len(\"java.version = \"):]", "= '''load(\"@bazel_tools//tools/jdk:fail_rule.bzl\", \"fail_rule\") fail_rule( name = \"jdk\", header = \"Auto-Configuration", "Detect version version = repository_ctx.attr.version if repository_ctx.attr.version != \"\" else", "\"PATH or specify Java from remote repository (e.g. \" +", "= \"runtime_toolchain_definition\", target_settings = [\":%s_settings_alias\" % name], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",", "= version_property[0][len(\"java.version = \"):] parts = version_value.split(\".\") major = parts[0]", "version (inclusive). Java compile toolchains use the same (local) JDK", "+ 1): default_java_toolchain( name = name + \"_toolchain_java\" + str(version),", "= version_value.split(\".\") major = parts[0] if len(parts) == 1: return", "will be applied to the java runtime target \"\"\" if", "repository_ctx.file( \"WORKSPACE\", \"# DO NOT EDIT: automatically generated WORKSPACE file", "<reponame>loongarch64/bazel # Copyright 2020 The Bazel Authors. All rights reserved.", "+ \"PATH or specify Java from remote repository (e.g. \"", "version_property[0][len(\"java.version = \"):] parts = version_value.split(\".\") major = parts[0] if", "\"version\": attr.string(), \"build_file\": attr.label(), }, ) def local_java_repository(name, java_home, version", "JDK imported. build_file: optionally BUILD file template version: optionally java", "java_home_path = repository_ctx.path(java_home) if not java_home_path.exists: fail('The path indicated by", "\"WORKSPACE\", \"# DO NOT EDIT: automatically generated WORKSPACE file for", "\"runtime_toolchain_definition\", target_settings = [\":%s_settings_alias\" % name], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\", toolchain", "use the same (local) JDK for compilation. If there is", "default_java_toolchain( name = name + \"_toolchain_java8\", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version", "if repository_ctx.attr.build_file != None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '\"jdk\"'", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "java_home = repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if not java_home_path.exists: fail('The", "toolchain( name = \"runtime_toolchain_definition\", target_settings = [\":localjdk_setting\"], toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",", "version_value.split(\".\") major = parts[0] if len(parts) == 1: return major", "Args: name: name of the target. java_home: Path to the", "\"_name_setting\": name + \"_name_setting\", name + \"_version_setting\": name + \"_version_setting\",", "= [\"//visibility:public\"]): \"\"\"Defines a java_runtime target together with Java runtime", "\"fail_rule\") fail_rule( name = \"jdk\", header = \"Auto-Configuration Error:\", message", "and version (inclusive). Java compile toolchains use the same (local)", "having value of the \"name\" or \"version\" parameter. Java compile", "configuration for JDK8 than the newer versions. Args: name: name", "java_home_path.get_child(\"bin\").get_child(\"java\" + extension) if not java_bin.exists: # Java binary does", "the JDK. version: Version of the JDK. runtime_name: name of", "an indented list of properties separated with newlines: # \"", "attr.label(), }, ) def local_java_repository(name, java_home, version = \"\", build_file", "target_version = version, java_runtime = runtime_name, ) elif type(version) ==", "predefined def _local_java_repository_impl(repository_ctx): \"\"\"Repository rule local_java_repository implementation. Args: repository_ctx: repository", "name = \"%s\", runtime_name = %s, java_home = \"%s\", version", "The Bazel Authors. All rights reserved. # # Licensed under", "name or version argument. Java compile toolchains are created for", "permissions and # limitations under the License. \"\"\"Rules for importing", "applied to the java runtime target \"\"\" if runtime_name ==", "version = version, build_file = build_file) native.register_toolchains(\"@\" + name +", "11.0.8\\n\" # \" java.version.date = 2020-11-05\\\" strip_properties = [property.strip() for", ") ''' _local_java_repository_rule = repository_rule( implementation = _local_java_repository_impl, local =", "return None version_value = version_property[0][len(\"java.version = \"):] parts = version_value.split(\".\")", "JDK and creates an unregistered compile toolchain. Toolchain resolution is", "\"License\"); # you may not use this file except in", "= runtime_name, java_home = java_home, visibility = visibility, ) native.config_setting(", "return # Detect version version = repository_ctx.attr.version if repository_ctx.attr.version !=", "java_home: Location of the JDK imported. build_file: optionally BUILD file", "of the JDK. runtime_name: name of java_runtime target if it", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "and registering a local JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def _detect_java_version(repository_ctx,", "name = runtime_name, java_home = java_home, visibility = visibility, )", "\"\"\"Defines a java_runtime target together with Java runtime and compile", "local = True, configure = True, attrs = { \"java_home\":", "== None: runtime_name = name native.java_runtime( name = runtime_name, java_home", "(repository_ctx.name, runtime_name, java_home, version) repository_ctx.file( \"BUILD.bazel\", 'load(\"@bazel_tools//tools/jdk:local_java_repository.bzl\", \"local_java_runtime\")\\n' + build_file", "\"_name_version_setting\", }), visibility = [\"//visibility:private\"], ) native.toolchain( name = \"runtime_toolchain_definition\",", "version \"\"\" _local_java_repository_rule(name = name, java_home = java_home, version =", "JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version = version, java_runtime = runtime_name,", "# distributed under the License is distributed on an \"AS", "for this rule. java_home: Location of the JDK imported. build_file:", "governing permissions and # limitations under the License. \"\"\"Rules for", "# Unless required by applicable law or agreed to in", "Java from remote repository (e.g. \" + \"--java_runtime_version=remotejdk_11\") ) config_setting(", "are predefined def _local_java_repository_impl(repository_ctx): \"\"\"Repository rule local_java_repository implementation. Args: repository_ctx:", "name + \"_name_setting\": name + \"_name_setting\", name + \"_version_setting\": name", "[\"//visibility:private\"], ) native.config_setting( name = name + \"_version_setting\", values =", "Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, \"", "handles versions below 1.8 minor = parts[1] return minor return", "\"_toolchain_java\" + str(version), source_version = str(version), target_version = str(version), java_runtime", "imported. build_file: optionally BUILD file template version: optionally java version", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "\"bin/java\" + extension, java_home = java_home, ), False, ) return", "Build file template, when JDK does not exist _NOJDK_BUILD_TPL =", "{\"java_runtime_version\": name + \"_\" + version}, visibility = [\"//visibility:private\"], )", "visibility = [\"//visibility:private\"], ) toolchain( name = \"runtime_toolchain_definition\", target_settings =", "template version: optionally java version \"\"\" _local_java_repository_rule(name = name, java_home", "\" + \"--java_runtime_version=remotejdk_11\") ) config_setting( name = \"localjdk_setting\", values =", "= repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if not java_home_path.exists: fail('The path", "\"\"\"Registers a runtime toolchain for local JDK and creates an", "= repository_ctx.name), ) extension = \".exe\" if repository_ctx.os.name.lower().find(\"windows\") != -1", "You may obtain a copy of the License at #", "extension) if not java_bin.exists: # Java binary does not exist", "name: name of the target. java_home: Path to the JDK.", "of the target. java_home: Path to the JDK. version: Version", "for importing and registering a local JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\")", "set to either name or version argument. Java compile toolchains", "\" + \"PATH or specify Java from remote repository (e.g.", "values = {\"java_runtime_version\": name}, visibility = [\"//visibility:private\"], ) native.config_setting( name", "elif major == \"1\": # handles versions below 1.8 minor", "= %s, java_home = \"%s\", version = \"%s\", ) \"\"\"", "local JDK.\"\"\" load(\":default_java_toolchain.bzl\", \"JVM8_TOOLCHAIN_CONFIGURATION\", \"default_java_toolchain\") def _detect_java_version(repository_ctx, java_bin): properties_out =", "is constrained by flag --java_runtime_version having value set to either", "if not java_bin.exists: # Java binary does not exist repository_ctx.file(", "the Apache License, Version 2.0 (the \"License\"); # you may", "= name + \"_name_version_setting\", values = {\"java_runtime_version\": name + \"_\"", "java_runtime target if it already exists. visibility: Visibility that will", "values = {{\"java_runtime_version\": \"{local_jdk}\"}}, visibility = [\"//visibility:private\"], ) toolchain( name", "is no JDK \"virtual\" targets are created, which fail only", "native.config_setting( name = name + \"_name_setting\", values = {\"java_runtime_version\": name}," ]
[ "fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag return fdi except ResourceNotFound:", "type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis = list(FixtureDataItem.by_domain(domain)) return", "= tp_f.CharField(attribute='_id', readonly=True, unique=True) def obj_get(self, bundle, **kwargs): return convert_fdt(get_object_or_not_exist(", "parent_ref_name and child_type and references: parent_fdi = FixtureDataItem.get(parent_id) fdis =", "else: fdis = list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for fdi in fdis]", "import JsonResource from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication, ) from", "child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif type_id or type_tag: type_id =", "Permissions def convert_fdt(fdi): try: fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag", "None) child_type = bundle.request.GET.get(\"child_type\", None) type_id = bundle.request.GET.get(\"fixture_type_id\", None) type_tag", "bundle, **kwargs): domain = kwargs['domain'] parent_id = bundle.request.GET.get(\"parent_id\", None) parent_ref_name", "means the ref'd fixture type was not found fixture_type =", "readonly=True, null=True) id = tp_f.CharField(attribute='_id', readonly=True, unique=True) def obj_get(self, bundle,", ") from corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType", "kwargs['domain'] parent_id = bundle.request.GET.get(\"parent_id\", None) parent_ref_name = bundle.request.GET.get(\"parent_ref_name\", None) references", "except ResourceNotFound: return fdi class FixtureResource(JsonResource): type = \"fixture\" fields", "authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem resource_name = 'fixture' limit", "return [convert_fdt(fdi) for fdi in fdis] or [] class Meta(CustomResourceMeta):", "tastypie import fields as tp_f from corehq.apps.api.resources import JsonResource from", "null, that means the ref'd fixture type was not found", "FixtureDataItem, kwargs['pk'], kwargs['domain'])) def obj_get_list(self, bundle, **kwargs): domain = kwargs['domain']", "corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from corehq.apps.users.models import Permissions def convert_fdt(fdi):", "import fields as tp_f from corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1", "type_tag: type_id = type_id or FixtureDataType.by_domain_tag( domain, type_tag).one() fdis =", "= list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for fdi in fdis] or []", "= bundle.request.GET.get(\"fixture_type\", None) if parent_id and parent_ref_name and child_type and", "def convert_fdt(fdi): try: fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag return", "get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from corehq.apps.users.models import Permissions", "id = tp_f.CharField(attribute='_id', readonly=True, unique=True) def obj_get(self, bundle, **kwargs): return", "ResourceNotFound: return fdi class FixtureResource(JsonResource): type = \"fixture\" fields =", "= list( FixtureDataItem.by_field_value( domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif type_id", "FixtureDataItem, FixtureDataType from corehq.apps.users.models import Permissions def convert_fdt(fdi): try: fdt", "list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for fdi in fdis] or [] class", "fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id = tp_f.CharField(attribute='_id', readonly=True, unique=True)", "or [] class Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem", "from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from corehq.apps.users.models import Permissions def", "from corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication,", "or FixtureDataType.by_domain_tag( domain, type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis", "domain, type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis = list(FixtureDataItem.by_domain(domain))", "FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag return fdi except ResourceNotFound: return fdi", "bundle.request.GET.get(\"fixture_type_id\", None) type_tag = bundle.request.GET.get(\"fixture_type\", None) if parent_id and parent_ref_name", "the ref'd fixture type was not found fixture_type = tp_f.CharField(attribute='fixture_type',", "tp_f from corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta,", "domain = kwargs['domain'] parent_id = bundle.request.GET.get(\"parent_id\", None) parent_ref_name = bundle.request.GET.get(\"parent_ref_name\",", "list( FixtureDataItem.by_field_value( domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif type_id or", "None) parent_ref_name = bundle.request.GET.get(\"parent_ref_name\", None) references = bundle.request.GET.get(\"references\", None) child_type", "= tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) # when null, that means the", "return fdi class FixtureResource(JsonResource): type = \"fixture\" fields = tp_f.DictField(attribute='try_fields_without_attributes',", "ref'd fixture type was not found fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True,", "bundle, **kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain'])) def obj_get_list(self, bundle,", "for fdi in fdis] or [] class Meta(CustomResourceMeta): authentication =", "fdis] or [] class Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class =", "import Permissions def convert_fdt(fdi): try: fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type =", "elif type_id or type_tag: type_id = type_id or FixtureDataType.by_domain_tag( domain,", "= bundle.request.GET.get(\"parent_id\", None) parent_ref_name = bundle.request.GET.get(\"parent_ref_name\", None) references = bundle.request.GET.get(\"references\",", "null=True) id = tp_f.CharField(attribute='_id', readonly=True, unique=True) def obj_get(self, bundle, **kwargs):", "parent_fdi.fields_without_attributes[references]) ) elif type_id or type_tag: type_id = type_id or", "child_type = bundle.request.GET.get(\"child_type\", None) type_id = bundle.request.GET.get(\"fixture_type_id\", None) type_tag =", "= type_id or FixtureDataType.by_domain_tag( domain, type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain, type_id))", "= bundle.request.GET.get(\"parent_ref_name\", None) references = bundle.request.GET.get(\"references\", None) child_type = bundle.request.GET.get(\"child_type\",", "= bundle.request.GET.get(\"references\", None) child_type = bundle.request.GET.get(\"child_type\", None) type_id = bundle.request.GET.get(\"fixture_type_id\",", "import get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from corehq.apps.users.models import", "list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis = list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for fdi", "was not found fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id =", "tp_f.CharField(attribute='_id', readonly=True, unique=True) def obj_get(self, bundle, **kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem,", "type_id = bundle.request.GET.get(\"fixture_type_id\", None) type_tag = bundle.request.GET.get(\"fixture_type\", None) if parent_id", "type = \"fixture\" fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) # when", "FixtureResource(JsonResource): type = \"fixture\" fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) #", "\"fixture\" fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) # when null, that", "convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain'])) def obj_get_list(self, bundle, **kwargs): domain =", "= FixtureDataItem.get(parent_id) fdis = list( FixtureDataItem.by_field_value( domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references])", "class FixtureResource(JsonResource): type = \"fixture\" fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True)", ") elif type_id or type_tag: type_id = type_id or FixtureDataType.by_domain_tag(", "fdis = list( FixtureDataItem.by_field_value( domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif", "**kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain'])) def obj_get_list(self, bundle, **kwargs):", "kwargs['pk'], kwargs['domain'])) def obj_get_list(self, bundle, **kwargs): domain = kwargs['domain'] parent_id", "child_type and references: parent_fdi = FixtureDataItem.get(parent_id) fdis = list( FixtureDataItem.by_field_value(", "fdis = list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis = list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi)", "convert_fdt(fdi): try: fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag return fdi", "found fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id = tp_f.CharField(attribute='_id', readonly=True,", "class Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem resource_name =", "type_id = type_id or FixtureDataType.by_domain_tag( domain, type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain,", "as tp_f from corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1 import (", "parent_fdi = FixtureDataItem.get(parent_id) fdis = list( FixtureDataItem.by_field_value( domain, child_type, parent_ref_name,", "tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) # when null, that means the ref'd", "None) type_tag = bundle.request.GET.get(\"fixture_type\", None) if parent_id and parent_ref_name and", "= kwargs['domain'] parent_id = bundle.request.GET.get(\"parent_id\", None) parent_ref_name = bundle.request.GET.get(\"parent_ref_name\", None)", "corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication, )", "try: fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag return fdi except", "FixtureDataItem.get(parent_id) fdis = list( FixtureDataItem.by_field_value( domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) )", "# when null, that means the ref'd fixture type was", "bundle.request.GET.get(\"fixture_type\", None) if parent_id and parent_ref_name and child_type and references:", "parent_id = bundle.request.GET.get(\"parent_id\", None) parent_ref_name = bundle.request.GET.get(\"parent_ref_name\", None) references =", "= FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag return fdi except ResourceNotFound: return", "return fdi except ResourceNotFound: return fdi class FixtureResource(JsonResource): type =", "and child_type and references: parent_fdi = FixtureDataItem.get(parent_id) fdis = list(", "from tastypie import fields as tp_f from corehq.apps.api.resources import JsonResource", "from couchdbkit import ResourceNotFound from tastypie import fields as tp_f", "RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem resource_name = 'fixture' limit = 0", "unique=True) def obj_get(self, bundle, **kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain']))", "import FixtureDataItem, FixtureDataType from corehq.apps.users.models import Permissions def convert_fdt(fdi): try:", "fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) # when null, that means", "None) type_id = bundle.request.GET.get(\"fixture_type_id\", None) type_tag = bundle.request.GET.get(\"fixture_type\", None) if", "if parent_id and parent_ref_name and child_type and references: parent_fdi =", "type was not found fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id", "FixtureDataType.by_domain_tag( domain, type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis =", "= list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis = list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for", "= \"fixture\" fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) # when null,", "ResourceNotFound from tastypie import fields as tp_f from corehq.apps.api.resources import", "None) references = bundle.request.GET.get(\"references\", None) child_type = bundle.request.GET.get(\"child_type\", None) type_id", "corehq.apps.users.models import Permissions def convert_fdt(fdi): try: fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type", "fixture type was not found fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True, null=True)", "fdi in fdis] or [] class Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps)", "bundle.request.GET.get(\"parent_ref_name\", None) references = bundle.request.GET.get(\"references\", None) child_type = bundle.request.GET.get(\"child_type\", None)", "fdt.tag return fdi except ResourceNotFound: return fdi class FixtureResource(JsonResource): type", "( CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models", "parent_ref_name = bundle.request.GET.get(\"parent_ref_name\", None) references = bundle.request.GET.get(\"references\", None) child_type =", "= bundle.request.GET.get(\"fixture_type_id\", None) type_tag = bundle.request.GET.get(\"fixture_type\", None) if parent_id and", "references = bundle.request.GET.get(\"references\", None) child_type = bundle.request.GET.get(\"child_type\", None) type_id =", "type_tag = bundle.request.GET.get(\"fixture_type\", None) if parent_id and parent_ref_name and child_type", "import ( CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util import get_object_or_not_exist from", "and references: parent_fdi = FixtureDataItem.get(parent_id) fdis = list( FixtureDataItem.by_field_value( domain,", "or type_tag: type_id = type_id or FixtureDataType.by_domain_tag( domain, type_tag).one() fdis", "JsonResource from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util", "= tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id = tp_f.CharField(attribute='_id', readonly=True, unique=True) def", "fdi.fixture_type = fdt.tag return fdi except ResourceNotFound: return fdi class", "FixtureDataItem.by_field_value( domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif type_id or type_tag:", "in fdis] or [] class Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class", "fdis = list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for fdi in fdis] or", "parent_id and parent_ref_name and child_type and references: parent_fdi = FixtureDataItem.get(parent_id)", "def obj_get(self, bundle, **kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain'])) def", "parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif type_id or type_tag: type_id = type_id", "corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from corehq.apps.users.models", "from corehq.apps.users.models import Permissions def convert_fdt(fdi): try: fdt = FixtureDataType.get(fdi.data_type_id)", "obj_get(self, bundle, **kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain'])) def obj_get_list(self,", "that means the ref'd fixture type was not found fixture_type", "domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif type_id or type_tag: type_id", "type_id or type_tag: type_id = type_id or FixtureDataType.by_domain_tag( domain, type_tag).one()", "type_id)) else: fdis = list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for fdi in", "references: parent_fdi = FixtureDataItem.get(parent_id) fdis = list( FixtureDataItem.by_field_value( domain, child_type,", "return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain'])) def obj_get_list(self, bundle, **kwargs): domain", "readonly=True, unique=True) # when null, that means the ref'd fixture", "from corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from", "from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util import", "not found fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id = tp_f.CharField(attribute='_id',", "obj_get_list(self, bundle, **kwargs): domain = kwargs['domain'] parent_id = bundle.request.GET.get(\"parent_id\", None)", "tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id = tp_f.CharField(attribute='_id', readonly=True, unique=True) def obj_get(self,", "**kwargs): domain = kwargs['domain'] parent_id = bundle.request.GET.get(\"parent_id\", None) parent_ref_name =", "Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem resource_name = 'fixture'", "and parent_ref_name and child_type and references: parent_fdi = FixtureDataItem.get(parent_id) fdis", "fields as tp_f from corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1 import", "unique=True) # when null, that means the ref'd fixture type", "= bundle.request.GET.get(\"child_type\", None) type_id = bundle.request.GET.get(\"fixture_type_id\", None) type_tag = bundle.request.GET.get(\"fixture_type\",", "fdi class FixtureResource(JsonResource): type = \"fixture\" fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True,", "fdi except ResourceNotFound: return fdi class FixtureResource(JsonResource): type = \"fixture\"", "readonly=True, unique=True) def obj_get(self, bundle, **kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'],", "when null, that means the ref'd fixture type was not", "CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models import", "type_id or FixtureDataType.by_domain_tag( domain, type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain, type_id)) else:", "None) if parent_id and parent_ref_name and child_type and references: parent_fdi", "[] class Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem resource_name", "corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util import get_object_or_not_exist", "import ResourceNotFound from tastypie import fields as tp_f from corehq.apps.api.resources", "RequirePermissionAuthentication, ) from corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem,", "= fdt.tag return fdi except ResourceNotFound: return fdi class FixtureResource(JsonResource):", "couchdbkit import ResourceNotFound from tastypie import fields as tp_f from", "FixtureDataType from corehq.apps.users.models import Permissions def convert_fdt(fdi): try: fdt =", "kwargs['domain'])) def obj_get_list(self, bundle, **kwargs): domain = kwargs['domain'] parent_id =", "bundle.request.GET.get(\"references\", None) child_type = bundle.request.GET.get(\"child_type\", None) type_id = bundle.request.GET.get(\"fixture_type_id\", None)", "bundle.request.GET.get(\"parent_id\", None) parent_ref_name = bundle.request.GET.get(\"parent_ref_name\", None) references = bundle.request.GET.get(\"references\", None)", "bundle.request.GET.get(\"child_type\", None) type_id = bundle.request.GET.get(\"fixture_type_id\", None) type_tag = bundle.request.GET.get(\"fixture_type\", None)", "= RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem resource_name = 'fixture' limit =", "def obj_get_list(self, bundle, **kwargs): domain = kwargs['domain'] parent_id = bundle.request.GET.get(\"parent_id\",", "[convert_fdt(fdi) for fdi in fdis] or [] class Meta(CustomResourceMeta): authentication" ]
[ "self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def", "status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id) class TestCreate(TestDomain): \"\"\"Test", "all the query information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate def", "responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.find()", "is less than the page size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url)", "\"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, headers={\"Location\": \"not", "@responses.activate def test_forced(self): \"\"\"The function should return all the data,", ".lib.testbase import ClientFixture class TestDomain(TestCase): # pylint: disable=too-few-public-methods \"\"\"Serve as", "self.assertRaises(TypeError, domain.get) @responses.activate def test_domain_id(self): \"\"\"The function should return data", "normally self.valid_response = [ {\"id\": 1234, \"name\": \"example.com\"}, {\"id\": 4321,", "= f\"{self.api_url}?name=example.com\" domain = Domain(client=self.client) data = domain.find(name=\"example.com\") # Verify", "self.assertRaises(TypeError, domain.delete) @responses.activate def test_delete_success(self): \"\"\"The function should return True", "Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class", "as the number of # entries returned is less than", "\"\"\" The function should return the created domain ID when", "**create_args) class TestDelete(TestDomain): \"\"\"Test the .delete method.\"\"\" @responses.activate def test_need_params(self):", "the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200) api_url = f\"{self.api_url}?name=example.com\"", "to API\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response[0],", "Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types) class TestRemoveDelegation(TestDomain): \"\"\"Test the", "types = [\"SSL\"] location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location},", "TestActivate(TestDomain): \"\"\"Test the .activate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", "= 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked response", "well as add all parameters to the request body \"\"\"", "status=200) domain = Domain(client=self.client) data = domain.all() data = domain.all()", "self.assertRaises(TypeError, domain.activate) @responses.activate def test_activate_success(self): \"\"\"The function should return True", "the created domain ID when additional params are specified, as", "data = domain.all() data = domain.all() # Verify all the", "domain = Domain(client=self.client) data = domain.find() self.assertEqual(data, self.valid_response) @responses.activate def", "f\"{self.cfixt.base_url}/domain/{version}\" # Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_response, status=200)", "the mocked response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response", "the inherited setUp method super().setUp() # Make sure the Client", "\"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class TestDelete(TestDomain): \"\"\"Test the", "@responses.activate def test_reject_failure_http_error(self): \"\"\"The function should raise an HTTPError exception", "test_suspend_failure_http_error(self): \"\"\" The function should raise an HTTPError exception if", "function should raise an HTTPError exception if counts cannot be", "True if the delegation removal succeeded.\"\"\" domain_id = 1234 org_id", "TestApproveDelegation(TestDomain): \"\"\"Test the .approve_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", "= domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate def test_suspend_failure_http_error(self): \"\"\" The function", "= f\"{self.cfixt.base_url}/domain/v1\" # Setup a test response one would expect", "json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.all() data =", "twice.\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)", "\"\"\"The function should return True if the delegation succeeded.\"\"\" domain_id", "only guaranteed as long as the number of # entries", "code and description if the Domain creation failed. \"\"\" #", "function should return data about the specified Domain ID.\"\"\" domain_id", "passed to API\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url,", "test_bad_http(self): \"\"\"The function should raise an HTTPError exception if domains", "the mocked response responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200) domain = Domain(client=self.client)", "def test_create_success(self): \"\"\" The function should return the created domain", "parameters. \"\"\" domain = Domain(client=self.client) # Not going to check", "Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate def test_domain_id(self): \"\"\"The function should return", "post_data = { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": org_id, \"certTypes\": types}]", "test_activate_success(self): \"\"\"The function should return True if the activation succeeded.\"\"\"", "parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delete)", "return in an error self.error_response = {\"description\": \"domain error\"} class", "domain = Domain(client=self.client) post_data = { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\":", "json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_delegate_failure_http_error(self): \"\"\"The function should raise an HTTPError", "should raise an HTTPError exception if the approval failed.\"\"\" domain_id", "org_id, types) post_data = { \"orgId\": org_id, \"certTypes\": types }", "1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_forced(self): \"\"\"The function", "def test_remove_delegation_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if", "{\"id\": 1234, \"name\": \"example.com\"}, {\"id\": 4321, \"name\": \"*.example.com\"}, {\"id\": 4322,", "would expect normally self.valid_response = [ {\"id\": 1234, \"name\": \"example.com\"},", "response = domain.create(\"sub2.example.com\", 4321, [\"SSL\"], description=\"Example sub domain\") self.assertEqual(response, {\"id\":", "test_create_failure_http_error(self): \"\"\" The function should return an error code and", "Setup the mocked response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain =", "\"\"\" The function should return an error code and description", "True if the activation succeeded.\"\"\" domain_id = 1234 api_url =", "test_approval_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if the", "status=200) domain = Domain(client=self.client) data = domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url,", "of unit testing # pylint: disable=protected-access # pylint: disable=no-member import", "without required parameters. \"\"\" domain = Domain(client=self.client) # Not going", "self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class TestDelete(TestDomain): \"\"\"Test the .delete method.\"\"\" @responses.activate", "the .all method.\"\"\" @responses.activate def test_cached(self): \"\"\"The function should return", "method.\"\"\" @responses.activate def test_cached(self): \"\"\"The function should return all the", "= domain.find(name=\"example.com\") # Verify all the query information self.assertEqual(responses.calls[0].request.url, api_url)", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.count) # Verify all the query", "function should return all the data, but should query the", "types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_remove_delegation_failure_http_error(self): \"\"\"The", "all the data, but should not query the API twice.\"\"\"", "responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.all) #", "return True if the rejection succeeded.\"\"\" domain_id = 1234 org_id", "org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked", "TestGet(TestDomain): \"\"\"Test the .get method.\"\"\" @responses.activate def test_need_domain_id(self): \"\"\"The function", "test_no_params(self): \"\"\"Without parameters, the method will return all domains\"\"\" #", "Setup the mocked response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client)", "# missing domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate def test_delegate_success(self): \"\"\"The function", "the created domain ID, as well as add all parameters", "# but verify that something is required self.assertRaises(TypeError, domain.create) @responses.activate", "# Setup a test response one would expect normally self.valid_response", "mocked response count = {\"count\": len(self.valid_response[0])} api_url = f\"{self.api_url}/count\" responses.add(responses.GET,", "Verify all the query information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate", ".get method.\"\"\" @responses.activate def test_need_domain_id(self): \"\"\"The function should raise an", "the mocked response responses.add(responses.GET, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError,", "exception if the delegation removal failed.\"\"\" domain_id = 1234 org_id", "domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_success_optional_params(self): \"\"\" The function should", "Make sure the Client fixture is created and setup self.cfixt", "\"Example sub domain\" } response = domain.create(\"sub2.example.com\", 4321, [\"SSL\"], description=\"Example", "if api_version is passed as a parameter.\"\"\" # Set a", "should return True if the deletion succeeded.\"\"\" domain_id = 1234", "# Setup the mocked response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain", "\"v3\" api_url = f\"{self.cfixt.base_url}/domain/{version}\" # Setup the mocked response responses.add(responses.GET,", "[{\"orgId\": org_id, \"certTypes\": types}] } response = domain.create(\"sub2.example.com\", org_id, types)", "parameters, # but verify that something is required self.assertRaises(TypeError, domain.create)", "self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def test_reject_delegation_success(self): \"\"\"The function should return True", "= Domain(client=self.client, api_version=version) data = domain.all() # Verify all the", "@responses.activate def test_remove_delegation_success(self): \"\"\"The function should return True if the", "return all the data, but should query the API twice.\"\"\"", "= f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=404)", "API twice.\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response,", "domain.count) # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url,", "def test_param(self): \"\"\"The URL should change if api_version is passed", "the data, but should not query the API twice.\"\"\" #", "Setup a test response one would expect normally self.valid_response =", "function should raise an HTTPError exception if the deletion failed.", "Domain(client=self.client) response = domain.remove_delegation(domain_id, org_id, types) post_data = { \"orgId\":", "the suspension failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\"", "the .delete method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "setUp method super().setUp() # Make sure the Client fixture is", "# Set a new version version = \"v3\" api_url =", "the mocked response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError,", "self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain): \"\"\"Test the .count method.\"\"\"", "status=200) domain = Domain(client=self.client) data = domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url,", "the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response) def", "are specified, as well add the non-required parameters to the", "test_domain_id(self): \"\"\"The function should return data about the specified Domain", "long as the number of # entries returned is less", "domain.remove_delegation(domain_id, org_id, types) post_data = { \"orgId\": org_id, \"certTypes\": types", ".approve_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "description if the Domain creation failed with DomainCreationResponseError (Domain ID", "initializer.\"\"\" @responses.activate def test_param(self): \"\"\"The URL should change if api_version", "domain.delegate(domain_id, org_id, types) post_data = { \"orgId\": org_id, \"certTypes\": types", "[\"other\"] } self.assertRaises(ValueError, domain.create, **create_args) @responses.activate def test_create_failure_http_status_unexpected(self): \"\"\" The", "api_version is passed as a parameter.\"\"\" # Set a new", "self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_domain_id_not_found(self): \"\"\" The function should", "response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data =", "\"\"\"The function should return all the data, but should not", "retrieved from the API.\"\"\" # Setup the mocked response api_url", "self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response) def test_need_client(self): \"\"\"The class should raise", "not found in response). \"\"\" # Setup the mocked response", "the deletion failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\"", "\"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class", "should return True if the activation succeeded.\"\"\" domain_id = 1234", "import TestCase import responses from cert_manager.domain import Domain, DomainCreationResponseError from", "# Don't warn about things that happen as that is", "location}, status=201) domain = Domain(client=self.client) post_data = { \"name\": \"sub2.example.com\",", "creation failed with DomainCreationResponseError (unexpected HTTP status code). \"\"\" #", "self.api_url = f\"{self.cfixt.base_url}/domain/v1\" # Setup a test response one would", "f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked response responses.add(responses.PUT, api_url, status=404) domain", "an HTTPError exception if the delegation failed.\"\"\" domain_id = 1234", "api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id) class", "\"org_id\": 4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class TestDelete(TestDomain):", "api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked response responses.add(responses.POST, api_url,", "4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_missing_location_header(self):", "\"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_domain_id_not_found(self): \"\"\"", "domain_id = 1234 location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location},", "Domain(client=self.client) data = domain.all() data = domain.all(force=True) # Verify all", "= 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked response", "should raise an HTTPError exception if counts cannot be retrieved", "4322, \"name\": \"subdomain.example.com\"}, ] # Setup a test response for", "self.assertEqual(data, self.valid_individual_response) @responses.activate def test_ne_domain_id(self): \"\"\"The function should raise an", "} self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_remove_delegation_failure_http_error(self): \"\"\"The function", "\"\"\"Without parameters, the method will count all domains\"\"\" # Setup", "the Domain creation failed with DomainCreationResponseError (unexpected HTTP status code).", "first time \"all\" is called. # Due to pagination, this", "without an domain_id parameter.\"\"\" domain = Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate", "= 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked response", "return the created domain ID when additional params are specified,", "response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id,", "domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate def test_delegate_success(self): \"\"\"The function should return", "mocked response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response =", "1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the", "will return all domains\"\"\" # Setup the mocked response responses.add(responses.GET,", "\"\"\"Test the .get method.\"\"\" @responses.activate def test_need_domain_id(self): \"\"\"The function should", "= Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate def test_delegate_success(self):", "function should raise an exception without an domain_id parameter.\"\"\" domain", "[\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class TestDelete(TestDomain): \"\"\"Test the .delete", "retrieved from the API.\"\"\" # Setup the mocked response responses.add(responses.GET,", "create_args = { \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"other\"] }", "domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate def", "response for getting a specific Domain self.valid_individual_response = self.valid_response[0] self.valid_individual_response[\"status\"]", "= domain.all() data = domain.all() # Verify all the query", "happen as that is part of unit testing # pylint:", "= 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup", "# Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200) domain", "data = domain.find(name=\"example.com\") # Verify all the query information self.assertEqual(responses.calls[0].request.url,", "f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked response responses.add(responses.POST, api_url, status=404) domain", "import json from requests.exceptions import HTTPError from testtools import TestCase", "responses from cert_manager.domain import Domain, DomainCreationResponseError from .lib.testbase import ClientFixture", "the page size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate", "Domain(client=self.client) response = domain.approve_delegation(domain_id, org_id) post_data = { \"orgId\": org_id,", "@responses.activate def test_bad_http(self): \"\"\"The function should raise an HTTPError exception", "self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def test_params(self): \"\"\"Parameters will be passed to", ".create method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "self.cfixt = self.useFixture(ClientFixture()) self.client = self.cfixt.client self.api_url = f\"{self.cfixt.base_url}/domain/v1\" #", "{ \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create,", "response = domain.approve_delegation(domain_id, org_id) post_data = { \"orgId\": org_id, }", "# entries returned is less than the page size self.assertEqual(len(responses.calls),", "self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_reject_failure_http_error(self): \"\"\"The function should", "\"\"\"The function should raise an HTTPError exception if domains cannot", "test_delegate_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if the", "# pylint: disable=protected-access # pylint: disable=no-member import json from requests.exceptions", "an HTTPError exception if the rejection failed.\"\"\" domain_id = 1234", "utf-8 -*- \"\"\"Define the cert_manager.domain.Domain unit tests.\"\"\" # Don't warn", "response = domain.delete(domain_id) self.assertEqual(True, response) @responses.activate def test_delete_failure_http_error(self): \"\"\" The", "api_version=version) data = domain.all() # Verify all the query information", "self.assertRaises(HTTPError, domain.find) # Verify all the query information self.assertEqual(len(responses.calls), 1)", "self.valid_response) @responses.activate def test_params(self): \"\"\"Parameters will be passed to API\"\"\"", "api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.DELETE, api_url,", "self.assertEqual(data, self.valid_response[0]) @responses.activate def test_bad_http(self): \"\"\"The function should raise an", "= {\"count\": len(self.valid_response[0])} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200)", "parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delegate)", "all the query information # There should only be one", "json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_failure_http_error(self): \"\"\" The function should return an", "= { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": 4321, \"certTypes\": [\"SSL\"]}], \"description\":", "mocked response responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200) api_url = f\"{self.api_url}?name=example.com\" domain", "mocked response api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=self.error_response, status=400) domain", "specified Domain ID.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" #", "will count all domains\"\"\" # Setup the mocked response count", "response domain_id = 1234 org_id = 4321 types = [\"SSL\"]", "responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.delegate(domain_id, org_id,", "Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def test_remove_delegation_success(self): \"\"\"The", "if the suspension failed. \"\"\" domain_id = 1234 api_url =", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id) class TestActivate(TestDomain): \"\"\"Test the .activate", "api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.count) # Verify", "from requests.exceptions import HTTPError from testtools import TestCase import responses", "# missing domain_id self.assertRaises(TypeError, domain.delete) @responses.activate def test_delete_success(self): \"\"\"The function", "the Domain class.\"\"\" def setUp(self): # pylint: disable=invalid-name \"\"\"Initialize the", "domain.count(name=\"example.com\") # Verify all the query information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data,", "HTTP status code). \"\"\" # Setup the mocked response responses.add(responses.POST,", "domain = Domain(client=self.client) data = domain.all() data = domain.all(force=True) #", "sure the Client fixture is created and setup self.cfixt =", "failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup", "if domains cannot be retrieved from the API.\"\"\" # Setup", "self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_failure_http_error(self): \"\"\" The", "domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_failure_http_error(self): \"\"\" The function should", "the deletion failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\"", "domain.delegate, domain_id, org_id, types) class TestRemoveDelegation(TestDomain): \"\"\"Test the .remove_delegation method.\"\"\"", "\"\"\"Test the .delete method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "org_id, \"certTypes\": types}] } response = domain.create(\"sub2.example.com\", org_id, types) self.assertEqual(response,", "responses.add(responses.POST, self.api_url, json=self.error_response, status=200) domain = Domain(client=self.client) create_args = {", "response api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=self.error_response, status=400) domain =", "self.valid_response) def test_need_client(self): \"\"\"The class should raise an exception without", "self.assertEqual(data, self.valid_response) def test_need_client(self): \"\"\"The class should raise an exception", "= Domain(client=self.client) post_data = { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": 4321,", "= 4321 types = [\"SSL\"] location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url,", "types) post_data = { \"orgId\": org_id, \"certTypes\": types } self.assertEqual(True,", "{\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_success_optional_params(self): \"\"\" The function", "status=200) domain = Domain(client=self.client) response = domain.delegate(domain_id, org_id, types) post_data", "response responses.add(responses.POST, self.api_url, status=201) domain = Domain(client=self.client) create_args = {", "\"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate", "The function should return the created domain ID when additional", "# There should only be one call the first time", ".find method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the method will", "should not query the API twice.\"\"\" # Setup the mocked", "= f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.POST, api_url, status=404)", "pylint: disable=too-few-public-methods \"\"\"Serve as a Base class for all tests", "} response = domain.create(\"sub2.example.com\", org_id, types) self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body,", "self.api_url, json=self.error_response, status=200) domain = Domain(client=self.client) create_args = { \"name\":", "# Setup the mocked response count = {\"count\": len(self.valid_response[0])} api_url", "domain.delete) @responses.activate def test_delete_success(self): \"\"\"The function should return True if", "Set a new version version = \"v3\" api_url = f\"{self.cfixt.base_url}/domain/{version}\"", "= f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked response responses.add(responses.POST, api_url, status=404)", "domain.create, **create_args) @responses.activate def test_create_failure_http_status_unexpected(self): \"\"\" The function should return", "responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response = domain.delete(domain_id) self.assertEqual(True,", "created domain ID, as well as add all parameters to", "from testtools import TestCase import responses from cert_manager.domain import Domain,", "domain = Domain(client=self.client) response = domain.reject_delegation(domain_id, org_id) post_data = {", "f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.count)", "api_url, status=200) domain = Domain(client=self.client) response = domain.activate(domain_id) self.assertEqual(True, response)", "f\"{self.cfixt.base_url}/domain/v1\" # Setup a test response one would expect normally", "domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def test_remove_delegation_success(self): \"\"\"The function should return", "Domain(client=self.client) # Not going to check every permutation of missing", "HTTPError exception if the rejection failed.\"\"\" domain_id = 1234 org_id", "Setup the mocked response responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client)", "the query information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate def test_bad_http(self):", "# Call the inherited setUp method super().setUp() # Make sure", "domain_id, org_id, types) class TestRemoveDelegation(TestDomain): \"\"\"Test the .remove_delegation method.\"\"\" @responses.activate", "\"\"\"The function should return all the data, but should query", "response domain_id = 1234 location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\":", "f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200)", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types) class TestApproveDelegation(TestDomain):", "exception if the approval failed.\"\"\" domain_id = 1234 org_id =", "api_url, json=self.valid_response, status=200) domain = Domain(client=self.client, api_version=version) data = domain.all()", "= domain.approve_delegation(domain_id, org_id) post_data = { \"orgId\": org_id, } self.assertEqual(True,", "raise an HTTPError exception if the deletion failed. \"\"\" domain_id", "should raise an HTTPError exception if domains cannot be retrieved", "self.error_response = {\"description\": \"domain error\"} class TestInit(TestDomain): \"\"\"Test the class", "for getting a specific Domain self.valid_individual_response = self.valid_response[0] self.valid_individual_response[\"status\"] =", "function should return the created domain ID when additional params", "= f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked response responses.add(responses.POST, api_url, status=200)", "# missing domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def test_remove_delegation_success(self): \"\"\"The function", "= domain.all() # Verify all the query information self.assertEqual(len(responses.calls), 1)", "def test_need_client(self): \"\"\"The class should raise an exception without a", "should query the API twice.\"\"\" # Setup the mocked response", "failed with DomainCreationResponseError (Domain ID not found in response). \"\"\"", "mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) create_args", "Setup the mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client)", "api_url, status=200) domain = Domain(client=self.client) response = domain.reject_delegation(domain_id, org_id) post_data", "self.api_url, status=201) domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\",", "function should return True if the activation succeeded.\"\"\" domain_id =", "4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked response responses.add(responses.POST,", "self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_bad_http(self): \"\"\"The", "response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_reject_failure_http_error(self): \"\"\"The function should raise", "less than the page size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url,", "Domain ID.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup", "parameter.\"\"\" domain = Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate def test_domain_id(self): \"\"\"The", "a test response for getting a specific Domain self.valid_individual_response =", "\"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": 4321, \"certTypes\": [\"SSL\"]}], \"description\": \"Example sub", "= [\"SSL\"] location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201)", "approval failed.\"\"\" domain_id = 1234 org_id = 4321 api_url =", "response) @responses.activate def test_activate_failure_http_error(self): \"\"\" The function should raise an", "response responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response = domain.delete(domain_id)", "Location header in response). \"\"\" # Setup the mocked response", ".delete method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "types) class TestRemoveDelegation(TestDomain): \"\"\"Test the .remove_delegation method.\"\"\" @responses.activate def test_need_params(self):", "the approval failed.\"\"\" domain_id = 1234 org_id = 4321 api_url", "mocked response responses.add(responses.POST, self.api_url, headers={\"Location\": \"not a url\"}, status=201) domain", "json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_approval_failure_http_error(self): \"\"\"The function should raise an HTTPError", "exception if the delegation failed.\"\"\" domain_id = 1234 org_id =", "api_url, status=200) domain = Domain(client=self.client) response = domain.delegate(domain_id, org_id, types)", "\"org_id\": 4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def", "\"certTypes\": types}] } response = domain.create(\"sub2.example.com\", org_id, types) self.assertEqual(response, {\"id\":", "response responses.add(responses.POST, self.api_url, headers={\"Location\": \"not a url\"}, status=201) domain =", "\"Active\" # Setup JSON to return in an error self.error_response", "Verify all the query information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data, count) @responses.activate", "@responses.activate def test_delegate_failure_http_error(self): \"\"\"The function should raise an HTTPError exception", "{ \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"other\"] } self.assertRaises(ValueError, domain.create,", "[\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_domain_id_not_found(self): \"\"\" The", "\"\"\"The function should return True if the suspension succeeded.\"\"\" domain_id", "method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise an", "api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types)", "\"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args)", "status=400) domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\", \"org_id\":", "things that happen as that is part of unit testing", "as that is part of unit testing # pylint: disable=protected-access", "domain.find() self.assertEqual(data, self.valid_response) @responses.activate def test_params(self): \"\"\"Parameters will be passed", "Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types) class TestApproveDelegation(TestDomain): \"\"\"Test the", "The function should raise an HTTPError exception if the deletion", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.find) # Verify all the query information", "= domain.delegate(domain_id, org_id, types) post_data = { \"orgId\": org_id, \"certTypes\":", "# Setup the mocked response responses.add(responses.POST, api_url, status=404) domain =", "an exception without a client parameter.\"\"\" self.assertRaises(TypeError, Domain) class TestAll(TestDomain):", "status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id) class TestActivate(TestDomain): \"\"\"Test", "\"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"other\"] } self.assertRaises(ValueError, domain.create, **create_args) @responses.activate", "HTTPError exception if counts cannot be retrieved from the API.\"\"\"", "self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_success_optional_params(self): \"\"\" The", "method will return all domains\"\"\" # Setup the mocked response", "of the Domain class.\"\"\" def setUp(self): # pylint: disable=invalid-name \"\"\"Initialize", "Domain self.valid_individual_response = self.valid_response[0] self.valid_individual_response[\"status\"] = \"Active\" # Setup JSON", "tests.\"\"\" # Don't warn about things that happen as that", "should raise an exception when called without required parameters. \"\"\"", "responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.approve_delegation(domain_id, org_id)", "api_url, json=count, status=200) domain = Domain(client=self.client) data = domain.count(name=\"example.com\") #", "code and description if the Domain creation failed with DomainCreationResponseError", "suspension failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" #", "data = domain.all(force=True) # Verify all the query information #", "from cert_manager.domain import Domain, DomainCreationResponseError from .lib.testbase import ClientFixture class", "def test_no_params(self): \"\"\"Without parameters, the method will count all domains\"\"\"", "Domain(client=self.client) response = domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate def test_suspend_failure_http_error(self): \"\"\"", "= f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.GET, api_url, status=404)", "api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id) class TestActivate(TestDomain):", "inherited setUp method super().setUp() # Make sure the Client fixture", "self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.find) # Verify", "self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_forced(self): \"\"\"The", "test_create_failure_domain_id_not_found(self): \"\"\" The function should return an error code and", "domain.all) # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url,", "There should only be one call the first time \"all\"", "return an error code and description if the Domain creation", "parameters, the method will count all domains\"\"\" # Setup the", "def test_delete_failure_http_error(self): \"\"\" The function should raise an HTTPError exception", "= f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked response responses.add(responses.POST, api_url, status=200)", "class.\"\"\" # Call the inherited setUp method super().setUp() # Make", "description if the Domain creation failed with DomainCreationResponseError (unexpected HTTP", "def test_need_params(self): \"\"\" The function should raise an exception when", "Setup the mocked response responses.add(responses.POST, self.api_url, status=201) domain = Domain(client=self.client)", "def test_cached(self): \"\"\"The function should return all the data, but", "\"certTypes\": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_delegate_failure_http_error(self):", "return True if the delegation succeeded.\"\"\" domain_id = 1234 org_id", "the rejection failed.\"\"\" domain_id = 1234 org_id = 4321 api_url", "json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_remove_delegation_failure_http_error(self): \"\"\"The function should raise an HTTPError", "number of # entries returned is less than the page", "Domain creation failed with DomainCreationResponseError (unexpected HTTP status code). \"\"\"", "should return all the data, but should query the API", "\"\"\"Without parameters, the method will return all domains\"\"\" # Setup", "all domains\"\"\" # Setup the mocked response count = {\"count\":", "raise an HTTPError exception if the delegation failed.\"\"\" domain_id =", "JSON to return in an error self.error_response = {\"description\": \"domain", "page size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response)", "self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def test_remove_delegation_success(self): \"\"\"The function should return True", "api_url, status=200) domain = Domain(client=self.client) response = domain.remove_delegation(domain_id, org_id, types)", "activation succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup", "should raise an exception without an domain_id parameter.\"\"\" domain =", "headers={\"Location\": \"not a url\"}, status=201) domain = Domain(client=self.client) create_args =", "Domain(client=self.client) self.assertRaises(HTTPError, domain.find) # Verify all the query information self.assertEqual(len(responses.calls),", "the mocked response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client)", "for all tests of the Domain class.\"\"\" def setUp(self): #", "domain_id) class TestSuspend(TestDomain): \"\"\"Test the .suspend method.\"\"\" @responses.activate def test_need_params(self):", "the mocked response count = {\"count\": len(self.valid_response[0])} api_url = f\"{self.api_url}/count\"", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.all) # Verify all the query", "the query information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data, count) @responses.activate def test_bad_http(self):", "response count = {\"count\": len(self.valid_response)} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url,", "the method will count all domains\"\"\" # Setup the mocked", "= domain.find() self.assertEqual(data, self.valid_response) @responses.activate def test_params(self): \"\"\"Parameters will be", "HTTPError exception if the specified Domain ID does not exist.\"\"\"", "= 1234 org_id = 4321 types = [\"SSL\"] api_url =", "# missing domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def test_approve_delegation_success(self): \"\"\"The function", "json from requests.exceptions import HTTPError from testtools import TestCase import", "TestRemoveDelegation(TestDomain): \"\"\"Test the .remove_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", "function should raise an exception when called without required parameters.", "Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delete) @responses.activate def test_delete_success(self): \"\"\"The", "TestInit(TestDomain): \"\"\"Test the class initializer.\"\"\" @responses.activate def test_param(self): \"\"\"The URL", "return True if the suspension succeeded.\"\"\" domain_id = 1234 api_url", "json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.find() self.assertEqual(data, self.valid_response)", "4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class TestDelete(TestDomain): \"\"\"Test", "the API.\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.error_response,", "expect normally self.valid_response = [ {\"id\": 1234, \"name\": \"example.com\"}, {\"id\":", "2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_bad_http(self):", "\"\"\" The function should raise an HTTPError exception if the", "mocked response domain_id = 1234 location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url,", "raise an HTTPError exception if the specified Domain ID does", "parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.approve_delegation)", "an HTTPError exception if the approval failed.\"\"\" domain_id = 1234", "when additional params are specified, as well add the non-required", "domain = Domain(client=self.client, api_version=version) data = domain.all() # Verify all", "= 1234 location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201)", "function should return the created domain ID, as well as", "the request body \"\"\" # Setup the mocked response domain_id", "to return in an error self.error_response = {\"description\": \"domain error\"}", "class TestRemoveDelegation(TestDomain): \"\"\"Test the .remove_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "should raise an HTTPError exception if the delegation failed.\"\"\" domain_id", "1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.DELETE,", "deletion succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup", "\"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.activate) @responses.activate", "{ \"orgId\": org_id, \"certTypes\": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\"))", "page size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def", "\"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": org_id, \"certTypes\": types}] } response =", "class TestAll(TestDomain): \"\"\"Test the .all method.\"\"\" @responses.activate def test_cached(self): \"\"\"The", "self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain): \"\"\"Test the .find method.\"\"\"", "test_create_success(self): \"\"\" The function should return the created domain ID,", "status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id) class TestRejectDelegation(TestDomain):", "api_url) @responses.activate def test_params(self): \"\"\"Parameters will be passed to API\"\"\"", "requests.exceptions import HTTPError from testtools import TestCase import responses from", "test_ne_domain_id(self): \"\"\"The function should raise an HTTPError exception if the", "domain = Domain(client=self.client) data = domain.find(name=\"example.com\") # Verify all the", "[\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.DELETE,", "passed as a parameter.\"\"\" # Set a new version version", "responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id) class", "@responses.activate def test_approval_failure_http_error(self): \"\"\"The function should raise an HTTPError exception", "with DomainCreationResponseError (no Location header in response). \"\"\" # Setup", "an HTTPError exception if the delegation removal failed.\"\"\" domain_id =", "@responses.activate def test_need_params(self): \"\"\" The function should raise an exception", "but should query the API twice.\"\"\" # Setup the mocked", "= Domain(client=self.client) response = domain.delegate(domain_id, org_id, types) post_data = {", "post_data = { \"orgId\": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\"))", "the .reject_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "if the rejection succeeded.\"\"\" domain_id = 1234 org_id = 4321", "an HTTPError exception if domains cannot be retrieved from the", "\"name\": \"*.example.com\"}, {\"id\": 4322, \"name\": \"subdomain.example.com\"}, ] # Setup a", "an HTTPError exception if the suspension failed. \"\"\" domain_id =", "test_activate_failure_http_error(self): \"\"\" The function should raise an HTTPError exception if", "super().setUp() # Make sure the Client fixture is created and", "org_id = 4321 types = [\"SSL\"] location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST,", "# -*- coding: utf-8 -*- \"\"\"Define the cert_manager.domain.Domain unit tests.\"\"\"", "\"example.com\"}, {\"id\": 4321, \"name\": \"*.example.com\"}, {\"id\": 4322, \"name\": \"subdomain.example.com\"}, ]", "= Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.activate) @responses.activate def test_activate_success(self):", "domain_id = 2345 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked", "query information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data, count) @responses.activate def test_bad_http(self): \"\"\"The", "class TestGet(TestDomain): \"\"\"Test the .get method.\"\"\" @responses.activate def test_need_domain_id(self): \"\"\"The", "response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.approve_delegation(domain_id,", "check every permutation of missing parameters, # but verify that", "domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked", "\"\"\" # Setup the mocked response domain_id = 1234 location", "json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_reject_failure_http_error(self): \"\"\"The function should raise an HTTPError", "= 4321 types = [\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup", "function should raise an HTTPError exception if the rejection failed.\"\"\"", "domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked", "description=\"Example sub domain\") self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def", "Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"other\"]", "def test_approve_delegation_success(self): \"\"\"The function should return True if the approval", "query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain): \"\"\"Test the", "data = domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate", "ID, as well as add all parameters to the request", "def test_create_failure_missing_location_header(self): \"\"\" The function should return an error code", "json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_success_optional_params(self): \"\"\" The function should return the", "self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_remove_delegation_failure_http_error(self): \"\"\"The function should", "a url\"}, status=201) domain = Domain(client=self.client) create_args = { \"name\":", "Domain(client=self.client) self.assertRaises(HTTPError, domain.count) # Verify all the query information self.assertEqual(len(responses.calls),", "rejection failed.\"\"\" domain_id = 1234 org_id = 4321 api_url =", "return data about the specified Domain ID.\"\"\" domain_id = 1234", "testing # pylint: disable=protected-access # pylint: disable=no-member import json from", "test_create_failure_http_status_unexpected(self): \"\"\" The function should return an error code and", "as a parameter.\"\"\" # Set a new version version =", "@responses.activate def test_params(self): \"\"\"Parameters will be passed to API\"\"\" #", "f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked response responses.add(responses.PUT, api_url, status=200) domain", "query information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate def test_bad_http(self): \"\"\"The", "\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the", "self.assertRaises(HTTPError, domain.delete, domain_id) class TestActivate(TestDomain): \"\"\"Test the .activate method.\"\"\" @responses.activate", "succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the", "function should return True if the approval succeeded.\"\"\" domain_id =", "domain.all() data = domain.all(force=True) # Verify all the query information", "org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_reject_failure_http_error(self): \"\"\"The", "domain_id self.assertRaises(TypeError, domain.delete) @responses.activate def test_delete_success(self): \"\"\"The function should return", "1234 org_id = 4321 types = [\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\"", "= f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201) domain = Domain(client=self.client)", "function should return True if the suspension succeeded.\"\"\" domain_id =", "[\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.POST,", "version version = \"v3\" api_url = f\"{self.cfixt.base_url}/domain/{version}\" # Setup the", "the API twice.\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url,", "# Setup the mocked response responses.add(responses.GET, api_url, status=404) domain =", "status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types) class", "function should raise an HTTPError exception if the specified Domain", "TestCount(TestDomain): \"\"\"Test the .count method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters,", "f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.POST, api_url, status=200) domain", "self.cfixt.client self.api_url = f\"{self.cfixt.base_url}/domain/v1\" # Setup a test response one", "@responses.activate def test_delete_failure_http_error(self): \"\"\" The function should raise an HTTPError", "should only be one call the first time \"all\" is", "domain.all(force=True) # Verify all the query information # There should", "# Setup the mocked response count = {\"count\": len(self.valid_response)} api_url", "= self.useFixture(ClientFixture()) self.client = self.cfixt.client self.api_url = f\"{self.cfixt.base_url}/domain/v1\" # Setup", "} self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_missing_location_header(self): \"\"\" The function", "about things that happen as that is part of unit", "\"sub2.example.com\", \"delegations\": [{\"orgId\": 4321, \"certTypes\": [\"SSL\"]}], \"description\": \"Example sub domain\"", "but verify that something is required self.assertRaises(TypeError, domain.create) @responses.activate def", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types) class TestRemoveDelegation(TestDomain):", "status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id) class TestDelegate(TestDomain): \"\"\"Test", "= { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": org_id, \"certTypes\": types}] }", "parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.reject_delegation)", "function should return all the data, but should not query", "a new version version = \"v3\" api_url = f\"{self.cfixt.base_url}/domain/{version}\" #", "= domain.all(force=True) # Verify all the query information # There", "# Setup the mocked response domain_id = 1234 org_id =", "exception if the rejection failed.\"\"\" domain_id = 1234 org_id =", "domain_id = 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\" #", "responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response = domain.suspend(domain_id) self.assertEqual(True,", "is part of unit testing # pylint: disable=protected-access # pylint:", "method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the method will return", "mocked response responses.add(responses.GET, api_url, json=self.valid_response, status=200) domain = Domain(client=self.client, api_version=version)", "responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id) class", "self.assertRaises(TypeError, domain.create) @responses.activate def test_create_success(self): \"\"\" The function should return", "self.api_url, headers={\"Location\": \"not a url\"}, status=201) domain = Domain(client=self.client) create_args", "parameter.\"\"\" self.assertRaises(TypeError, Domain) class TestAll(TestDomain): \"\"\"Test the .all method.\"\"\" @responses.activate", ".reject_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "\"\"\"The class should raise an exception without a client parameter.\"\"\"", "TestAll(TestDomain): \"\"\"Test the .all method.\"\"\" @responses.activate def test_cached(self): \"\"\"The function", "= Domain(client=self.client) data = domain.all() data = domain.all() # Verify", "the mocked response count = {\"count\": len(self.valid_response)} api_url = f\"{self.api_url}/count\"", "= { \"orgId\": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate", "1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain): \"\"\"Test the .find method.\"\"\" @responses.activate", "= Domain(client=self.client) data = domain.count(name=\"example.com\") # Verify all the query", "domain.delete(domain_id) self.assertEqual(True, response) @responses.activate def test_delete_failure_http_error(self): \"\"\" The function should", "\"\"\"Test the .create method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "mocked response responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response =", "Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) class", "status code). \"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url,", "data = domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def test_params(self):", "and setup self.cfixt = self.useFixture(ClientFixture()) self.client = self.cfixt.client self.api_url =", "cert_manager.domain import Domain, DomainCreationResponseError from .lib.testbase import ClientFixture class TestDomain(TestCase):", "API.\"\"\" # Setup the mocked response api_url = f\"{self.api_url}/count\" responses.add(responses.GET,", "# Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url)", "self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types) class TestRemoveDelegation(TestDomain): \"\"\"Test the .remove_delegation", "Setup the mocked response domain_id = 1234 org_id = 4321", "if the approval failed.\"\"\" domain_id = 1234 org_id = 4321", "test response for getting a specific Domain self.valid_individual_response = self.valid_response[0]", "\"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delete) @responses.activate", "Domain(client=self.client) data = domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def", "self.valid_individual_response[\"status\"] = \"Active\" # Setup JSON to return in an", "\"\"\"Parameters will be passed to API\"\"\" # Setup the mocked", "@responses.activate def test_need_domain_id(self): \"\"\"The function should raise an exception without", "def test_create_failure_http_status_unexpected(self): \"\"\" The function should return an error code", "self.assertEqual(data, count) @responses.activate def test_bad_http(self): \"\"\"The function should raise an", "domain_id, org_id, types) class TestApproveDelegation(TestDomain): \"\"\"Test the .approve_delegation method.\"\"\" @responses.activate", "\"\"\"Test the .find method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the", "= 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked response", "def test_approval_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if", "TestCase import responses from cert_manager.domain import Domain, DomainCreationResponseError from .lib.testbase", "as a Base class for all tests of the Domain", "len(self.valid_response[0])} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200) domain =", "{\"description\": \"domain error\"} class TestInit(TestDomain): \"\"\"Test the class initializer.\"\"\" @responses.activate", "should return the created domain ID when additional params are", "HTTPError exception if the approval failed.\"\"\" domain_id = 1234 org_id", "data about the specified Domain ID.\"\"\" domain_id = 1234 api_url", "responses.add(responses.GET, api_url, json=count, status=200) domain = Domain(client=self.client) data = domain.count()", "ID when additional params are specified, as well add the", "} response = domain.create(\"sub2.example.com\", 4321, [\"SSL\"], description=\"Example sub domain\") self.assertEqual(response,", "created and setup self.cfixt = self.useFixture(ClientFixture()) self.client = self.cfixt.client self.api_url", "the page size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data,", "{\"count\": len(self.valid_response[0])} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200) domain", "raise an HTTPError exception if domains cannot be retrieved from", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id) class TestCreate(TestDomain): \"\"\"Test the", "2345 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.GET,", "\"\"\"Test the .all method.\"\"\" @responses.activate def test_cached(self): \"\"\"The function should", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id) class TestCreate(TestDomain): \"\"\"Test the .create", "domain\") self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_failure_http_error(self): \"\"\"", "test_bad_http(self): \"\"\"The function should raise an HTTPError exception if counts", "should raise an exception without a client parameter.\"\"\" self.assertRaises(TypeError, Domain)", "api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types)", "Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def test_reject_delegation_success(self): \"\"\"The", "status=200) domain = Domain(client=self.client) data = domain.count(name=\"example.com\") # Verify all", "self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain): \"\"\"Test the .find method.\"\"\" @responses.activate def", "domain_id parameter.\"\"\" domain = Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate def test_domain_id(self):", "if counts cannot be retrieved from the API.\"\"\" # Setup", "Domain) class TestAll(TestDomain): \"\"\"Test the .all method.\"\"\" @responses.activate def test_cached(self):", "responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id) class", "setUp(self): # pylint: disable=invalid-name \"\"\"Initialize the class.\"\"\" # Call the", "domain.create, **create_args) @responses.activate def test_create_failure_missing_location_header(self): \"\"\" The function should return", "response one would expect normally self.valid_response = [ {\"id\": 1234,", "class TestActivate(TestDomain): \"\"\"Test the .activate method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "response) @responses.activate def test_delete_failure_http_error(self): \"\"\" The function should raise an", "api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.DELETE, api_url,", "# Due to pagination, this is only guaranteed as long", "the class initializer.\"\"\" @responses.activate def test_param(self): \"\"\"The URL should change", "the mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError,", "mocked response domain_id = 1234 org_id = 4321 types =", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id) class TestRejectDelegation(TestDomain): \"\"\"Test the", "domain.approve_delegation) @responses.activate def test_approve_delegation_success(self): \"\"\"The function should return True if", "self.useFixture(ClientFixture()) self.client = self.cfixt.client self.api_url = f\"{self.cfixt.base_url}/domain/v1\" # Setup a", "about the specified Domain ID.\"\"\" domain_id = 1234 api_url =", "\"orgId\": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_approval_failure_http_error(self):", "= domain.all() data = domain.all(force=True) # Verify all the query", "= Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def test_reject_delegation_success(self):", "should raise an HTTPError exception if the rejection failed.\"\"\" domain_id", "not query the API twice.\"\"\" # Setup the mocked response", "response). \"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, headers={\"Location\":", "response = domain.reject_delegation(domain_id, org_id) post_data = { \"orgId\": org_id, }", "DomainCreationResponseError (no Location header in response). \"\"\" # Setup the", "= domain.delete(domain_id) self.assertEqual(True, response) @responses.activate def test_delete_failure_http_error(self): \"\"\" The function", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types) class TestApproveDelegation(TestDomain): \"\"\"Test", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id) class TestDelegate(TestDomain): \"\"\"Test the", "to the request body \"\"\" # Setup the mocked response", "the deletion succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" #", "response = domain.delegate(domain_id, org_id, types) post_data = { \"orgId\": org_id,", "api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200) domain = Domain(client=self.client)", "if the rejection failed.\"\"\" domain_id = 1234 org_id = 4321", "time \"all\" is called. # Due to pagination, this is", "Domain(client=self.client) post_data = { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": 4321, \"certTypes\":", "Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id) class TestSuspend(TestDomain): \"\"\"Test the .suspend method.\"\"\"", "a client parameter.\"\"\" self.assertRaises(TypeError, Domain) class TestAll(TestDomain): \"\"\"Test the .all", "data, but should not query the API twice.\"\"\" # Setup", "specified, as well add the non-required parameters to the request", "Setup the mocked response count = {\"count\": len(self.valid_response[0])} api_url =", "\"\"\" # Setup the mocked response domain_id = 1234 org_id", "= f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError,", "count) @responses.activate def test_bad_http(self): \"\"\"The function should raise an HTTPError", "Setup the mocked response responses.add(responses.POST, self.api_url, headers={\"Location\": \"not a url\"},", "self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain): \"\"\"Test the .get method.\"\"\"", ".activate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked response responses.add(responses.PUT, api_url,", "self.assertRaises(HTTPError, domain.all) # Verify all the query information self.assertEqual(len(responses.calls), 1)", "self.valid_response) @responses.activate def test_forced(self): \"\"\"The function should return all the", "raise an HTTPError exception if the suspension failed. \"\"\" domain_id", "domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked", "Client fixture is created and setup self.cfixt = self.useFixture(ClientFixture()) self.client", "required parameters. \"\"\" domain = Domain(client=self.client) # Not going to", "Domain(client=self.client) self.assertRaises(HTTPError, domain.all) # Verify all the query information self.assertEqual(len(responses.calls),", "def test_reject_delegation_success(self): \"\"\"The function should return True if the rejection", "True if the deletion succeeded.\"\"\" domain_id = 1234 api_url =", "function should raise an HTTPError exception if the delegation removal", "\"\"\"Test the .approve_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "deletion failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" #", "and description if the Domain creation failed with DomainCreationResponseError (unexpected", "exist.\"\"\" domain_id = 2345 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the", "f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.GET, api_url, status=404) domain", "1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked response responses.add(responses.PUT,", "function should return True if the delegation removal succeeded.\"\"\" domain_id", "entries returned is less than the page size self.assertEqual(len(responses.calls), 2)", "sub domain\" } response = domain.create(\"sub2.example.com\", 4321, [\"SSL\"], description=\"Example sub", "response responses.add(responses.GET, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id)", "an exception when called without required parameters. \"\"\" domain =", "= Domain(client=self.client) response = domain.activate(domain_id) self.assertEqual(True, response) @responses.activate def test_activate_failure_http_error(self):", "Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id) class TestActivate(TestDomain): \"\"\"Test the .activate method.\"\"\"", "self.assertRaises(TypeError, domain.delegate) @responses.activate def test_delegate_success(self): \"\"\"The function should return True", "when called without required parameters. \"\"\" domain = Domain(client=self.client) #", "= {\"count\": len(self.valid_response)} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200)", "should return True if the approval succeeded.\"\"\" domain_id = 1234", "def test_activate_failure_http_error(self): \"\"\" The function should raise an HTTPError exception", "domain = Domain(client=self.client) data = domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url)", "response responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response = domain.remove_delegation(domain_id,", "1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate def test_ne_domain_id(self): \"\"\"The function", "self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id) class TestRejectDelegation(TestDomain): \"\"\"Test the .reject_delegation method.\"\"\"", "] # Setup a test response for getting a specific", "org_id, types) class TestApproveDelegation(TestDomain): \"\"\"Test the .approve_delegation method.\"\"\" @responses.activate def", "status=200) domain = Domain(client=self.client) response = domain.activate(domain_id) self.assertEqual(True, response) @responses.activate", "Setup the mocked response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client)", "Setup the mocked response count = {\"count\": len(self.valid_response)} api_url =", "= Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\":", "responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id,", "the Domain creation failed. \"\"\" # Setup the mocked response", "\"\"\"The function should raise an HTTPError exception if the approval", "class for all tests of the Domain class.\"\"\" def setUp(self):", "responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.reject_delegation, domain_id, org_id)", "domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\", \"org_id\": 4321,", "missing domain_id self.assertRaises(TypeError, domain.activate) @responses.activate def test_activate_success(self): \"\"\"The function should", "status=200) domain = Domain(client=self.client) data = domain.all() data = domain.all(force=True)", "self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types) class TestApproveDelegation(TestDomain): \"\"\"Test the .approve_delegation", "org_id) post_data = { \"orgId\": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body,", "= Domain(client=self.client) response = domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate def test_suspend_failure_http_error(self):", "return the created domain ID, as well as add all", "class TestInit(TestDomain): \"\"\"Test the class initializer.\"\"\" @responses.activate def test_param(self): \"\"\"The", "response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id)", "is called. # Due to pagination, this is only guaranteed", "response = domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate def test_suspend_failure_http_error(self): \"\"\" The", "self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain): \"\"\"Test the .get method.\"\"\" @responses.activate def", "\"delegations\": [{\"orgId\": org_id, \"certTypes\": types}] } response = domain.create(\"sub2.example.com\", org_id,", "well add the non-required parameters to the request body \"\"\"", "if the activation succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\"", "-*- coding: utf-8 -*- \"\"\"Define the cert_manager.domain.Domain unit tests.\"\"\" #", "passed to API\"\"\" # Setup the mocked response count =", "The function should return an error code and description if", "the Domain creation failed with DomainCreationResponseError (Domain ID not found", "\"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate", "= f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked response responses.add(responses.PUT, api_url, status=404)", "(no Location header in response). \"\"\" # Setup the mocked", "only be one call the first time \"all\" is called.", "as well as add all parameters to the request body", "Due to pagination, this is only guaranteed as long as", "test_need_domain_id(self): \"\"\"The function should raise an exception without an domain_id", "Domain(client=self.client) data = domain.all() data = domain.all() # Verify all", "status=200) domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\", \"org_id\":", "= f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200) domain = Domain(client=self.client) data", "TestSuspend(TestDomain): \"\"\"Test the .suspend method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", "if the deletion succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\"", "should return True if the delegation succeeded.\"\"\" domain_id = 1234", "= {\"description\": \"domain error\"} class TestInit(TestDomain): \"\"\"Test the class initializer.\"\"\"", "responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.all()", "= { \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"other\"] } self.assertRaises(ValueError,", "function should raise an HTTPError exception if the suspension failed.", "= Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def test_approve_delegation_success(self):", "# Make sure the Client fixture is created and setup", "response = domain.create(\"sub2.example.com\", org_id, types) self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\"))", "the .delegate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "than the page size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response)", "mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate,", "= f\"{self.cfixt.base_url}/domain/{version}\" # Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_response,", "response) @responses.activate def test_suspend_failure_http_error(self): \"\"\" The function should raise an", "as add all parameters to the request body \"\"\" #", "Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain =", "api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id) class TestSuspend(TestDomain):", "self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate def test_ne_domain_id(self): \"\"\"The function should", "test_forced(self): \"\"\"The function should return all the data, but should", "method will count all domains\"\"\" # Setup the mocked response", "raise an exception without an domain_id parameter.\"\"\" domain = Domain(client=self.client)", "= f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked response responses.add(responses.PUT, api_url, status=404)", "self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data, count) @responses.activate def test_bad_http(self): \"\"\"The function should", "self.assertRaises(HTTPError, domain.suspend, domain_id) class TestDelegate(TestDomain): \"\"\"Test the .delegate method.\"\"\" @responses.activate", "a parameter.\"\"\" # Set a new version version = \"v3\"", "len(self.valid_response)} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200) domain =", "the mocked response responses.add(responses.POST, self.api_url, headers={\"Location\": \"not a url\"}, status=201)", "self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def test_params(self): \"\"\"Parameters will be", "pylint: disable=no-member import json from requests.exceptions import HTTPError from testtools", "the API.\"\"\" # Setup the mocked response api_url = f\"{self.api_url}/count\"", "succeeded.\"\"\" domain_id = 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\"", "The function should raise an exception when called without required", "coding: utf-8 -*- \"\"\"Define the cert_manager.domain.Domain unit tests.\"\"\" # Don't", "= f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=404)", "\"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=200)", "domain_id self.assertRaises(TypeError, domain.activate) @responses.activate def test_activate_success(self): \"\"\"The function should return", "{\"id\": 4322, \"name\": \"subdomain.example.com\"}, ] # Setup a test response", "domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate def test_suspend_success(self): \"\"\"The function should return", "f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked response responses.add(responses.POST, api_url, status=200) domain", "exception if the suspension failed. \"\"\" domain_id = 1234 api_url", "error code and description if the Domain creation failed. \"\"\"", "call the first time \"all\" is called. # Due to", "The function should return the created domain ID, as well", "self.valid_response) @responses.activate def test_bad_http(self): \"\"\"The function should raise an HTTPError", "json=self.valid_individual_response, status=200) domain = Domain(client=self.client) data = domain.get(domain_id) self.assertEqual(len(responses.calls), 1)", "class TestApproveDelegation(TestDomain): \"\"\"Test the .approve_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "= Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate def test_domain_id(self): \"\"\"The function should", "@responses.activate def test_no_params(self): \"\"\"Without parameters, the method will return all", "domain_id) class TestDelegate(TestDomain): \"\"\"Test the .delegate method.\"\"\" @responses.activate def test_need_params(self):", "DomainCreationResponseError from .lib.testbase import ClientFixture class TestDomain(TestCase): # pylint: disable=too-few-public-methods", "called without required parameters. \"\"\" domain = Domain(client=self.client) # Not", "1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the", "function should return True if the rejection succeeded.\"\"\" domain_id =", "f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked response responses.add(responses.POST, api_url, status=200) domain", "self.api_url) class TestFind(TestDomain): \"\"\"Test the .find method.\"\"\" @responses.activate def test_no_params(self):", "responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.reject_delegation(domain_id, org_id)", "Setup the mocked response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client)", "domain.create, **create_args) class TestDelete(TestDomain): \"\"\"Test the .delete method.\"\"\" @responses.activate def", "\"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate", "True if the rejection succeeded.\"\"\" domain_id = 1234 org_id =", "\"all\" is called. # Due to pagination, this is only", "types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_delegate_failure_http_error(self): \"\"\"The", "unit testing # pylint: disable=protected-access # pylint: disable=no-member import json", "= domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate def", "self.api_url, headers={\"Location\": location}, status=201) domain = Domain(client=self.client) post_data = {", "@responses.activate def test_create_success(self): \"\"\" The function should return the created", "the .count method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the method", "json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.find) # Verify all", "Don't warn about things that happen as that is part", "responses.add(responses.GET, api_url, json=self.valid_response, status=200) domain = Domain(client=self.client, api_version=version) data =", "response responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response = domain.suspend(domain_id)", "domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def test_reject_delegation_success(self): \"\"\"The function should return", "status=200) domain = Domain(client=self.client) response = domain.delete(domain_id) self.assertEqual(True, response) @responses.activate", "mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation,", "\"subdomain.example.com\"}, ] # Setup a test response for getting a", "test_delete_failure_http_error(self): \"\"\" The function should raise an HTTPError exception if", "@responses.activate def test_remove_delegation_failure_http_error(self): \"\"\"The function should raise an HTTPError exception", "the .approve_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "class should raise an exception without a client parameter.\"\"\" self.assertRaises(TypeError,", "return True if the deletion succeeded.\"\"\" domain_id = 1234 api_url", "the class.\"\"\" # Call the inherited setUp method super().setUp() #", "org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked", "the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=200) domain = Domain(client=self.client)", "\"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate", "# Setup the mocked response responses.add(responses.POST, api_url, status=200) domain =", "approval succeeded.\"\"\" domain_id = 1234 org_id = 4321 api_url =", "in an error self.error_response = {\"description\": \"domain error\"} class TestInit(TestDomain):", "body \"\"\" # Setup the mocked response domain_id = 1234", "mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=200) domain = Domain(client=self.client) create_args", "= f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked response responses.add(responses.POST, api_url, status=404)", "does not exist.\"\"\" domain_id = 2345 api_url = f\"{self.api_url}/{str(domain_id)}\" #", "# missing domain_id self.assertRaises(TypeError, domain.activate) @responses.activate def test_activate_success(self): \"\"\"The function", "an error self.error_response = {\"description\": \"domain error\"} class TestInit(TestDomain): \"\"\"Test", "parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.suspend)", "as well add the non-required parameters to the request body", "TestDelete(TestDomain): \"\"\"Test the .delete method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", "test_need_params(self): \"\"\" The function should raise an exception when called", "domain = Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate def test_domain_id(self): \"\"\"The function", "domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate def test_suspend_failure_http_error(self): \"\"\" The function should", "parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.activate)", "location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201) domain =", "\"\"\" domain = Domain(client=self.client) # Not going to check every", "test_no_params(self): \"\"\"Without parameters, the method will count all domains\"\"\" #", "response responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200) domain = Domain(client=self.client) data =", "permutation of missing parameters, # but verify that something is", "disable=protected-access # pylint: disable=no-member import json from requests.exceptions import HTTPError", "= Domain(client=self.client) response = domain.delete(domain_id) self.assertEqual(True, response) @responses.activate def test_delete_failure_http_error(self):", "domains cannot be retrieved from the API.\"\"\" # Setup the", "domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def test_approve_delegation_success(self): \"\"\"The function should return", "information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response) def test_need_client(self): \"\"\"The", "return True if the delegation removal succeeded.\"\"\" domain_id = 1234", "the mocked response domain_id = 1234 location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST,", "Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id) class TestDelegate(TestDomain): \"\"\"Test the .delegate method.\"\"\"", "domain_id = 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\" #", "missing domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def test_approve_delegation_success(self): \"\"\"The function should", "data = domain.find() self.assertEqual(data, self.valid_response) @responses.activate def test_params(self): \"\"\"Parameters will", "self.assertEqual(True, response) @responses.activate def test_activate_failure_http_error(self): \"\"\" The function should raise", "self.assertEqual(data, self.valid_response) @responses.activate def test_forced(self): \"\"\"The function should return all", "method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the method will count", "going to check every permutation of missing parameters, # but", "TestRejectDelegation(TestDomain): \"\"\"Test the .reject_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", ".delegate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain): \"\"\"Test the", "delegation removal succeeded.\"\"\" domain_id = 1234 org_id = 4321 types", "count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def test_params(self): \"\"\"Parameters will be passed", "= f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked response responses.add(responses.PUT, api_url, status=200)", "api_url) class TestGet(TestDomain): \"\"\"Test the .get method.\"\"\" @responses.activate def test_need_domain_id(self):", "failed.\"\"\" domain_id = 1234 org_id = 4321 types = [\"SSL\"]", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.find) # Verify all the query", "information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain): \"\"\"Test the .get", "domain_id = 1234 org_id = 4321 types = [\"SSL\"] location", "the Client fixture is created and setup self.cfixt = self.useFixture(ClientFixture())", "domain\" } response = domain.create(\"sub2.example.com\", 4321, [\"SSL\"], description=\"Example sub domain\")", "if the suspension succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\"", "Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate def test_suspend_success(self): \"\"\"The", "import responses from cert_manager.domain import Domain, DomainCreationResponseError from .lib.testbase import", "mocked response responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200) domain = Domain(client=self.client) data", "def test_ne_domain_id(self): \"\"\"The function should raise an HTTPError exception if", "cannot be retrieved from the API.\"\"\" # Setup the mocked", "@responses.activate def test_suspend_failure_http_error(self): \"\"\" The function should raise an HTTPError", "url\"}, status=201) domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\",", "return True if the approval succeeded.\"\"\" domain_id = 1234 org_id", "creation failed with DomainCreationResponseError (Domain ID not found in response).", "@responses.activate def test_delegate_success(self): \"\"\"The function should return True if the", "types) self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_success_optional_params(self): \"\"\"", "# Verify all the query information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data, count)", "the delegation failed.\"\"\" domain_id = 1234 org_id = 4321 types", "# Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200) api_url", "mocked response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delete,", "all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain):", "if the Domain creation failed. \"\"\" # Setup the mocked", "= Domain(client=self.client) data = domain.find(name=\"example.com\") # Verify all the query", "def test_delegate_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if", "\"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, status=201) domain", "def test_reject_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if", "api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id) class TestCreate(TestDomain):", "a test response one would expect normally self.valid_response = [", "raise an HTTPError exception if the delegation removal failed.\"\"\" domain_id", "@responses.activate def test_activate_success(self): \"\"\"The function should return True if the", "self.assertRaises(TypeError, Domain) class TestAll(TestDomain): \"\"\"Test the .all method.\"\"\" @responses.activate def", "test_need_client(self): \"\"\"The class should raise an exception without a client", "self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_missing_location_header(self): \"\"\" The function should", "# Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=200) domain", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.all) # Verify all the query information", "f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=200) domain", "= f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_individual_response,", "} self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_domain_id_not_found(self): \"\"\" The function", "all the data, but should query the API twice.\"\"\" #", "class TestDelegate(TestDomain): \"\"\"Test the .delegate method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "response responses.add(responses.POST, self.api_url, json=self.error_response, status=200) domain = Domain(client=self.client) create_args =", "test response one would expect normally self.valid_response = [ {\"id\":", "all the query information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data, count) @responses.activate def", "\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the", "[{\"orgId\": 4321, \"certTypes\": [\"SSL\"]}], \"description\": \"Example sub domain\" } response", "non-required parameters to the request body \"\"\" # Setup the", "\"\"\"The function should return data about the specified Domain ID.\"\"\"", "of missing parameters, # but verify that something is required", "self.valid_individual_response) @responses.activate def test_ne_domain_id(self): \"\"\"The function should raise an HTTPError", "def test_activate_success(self): \"\"\"The function should return True if the activation", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.count) # Verify all the query information", "query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response) def test_need_client(self):", "should raise an HTTPError exception if the suspension failed. \"\"\"", "4321, \"name\": \"*.example.com\"}, {\"id\": 4322, \"name\": \"subdomain.example.com\"}, ] # Setup", "data = domain.all() data = domain.all(force=True) # Verify all the", "[\"SSL\"] location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201) domain", "Domain creation failed with DomainCreationResponseError (no Location header in response).", "f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=404) domain", "should return an error code and description if the Domain", "ID does not exist.\"\"\" domain_id = 2345 api_url = f\"{self.api_url}/{str(domain_id)}\"", "return True if the activation succeeded.\"\"\" domain_id = 1234 api_url", "1234 org_id = 4321 types = [\"SSL\"] location = f\"{self.api_url}/{str(domain_id)}\"", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id) class TestSuspend(TestDomain): \"\"\"Test the", "org_id = 4321 types = [\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" #", "return all the data, but should not query the API", "API.\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.error_response, status=400)", "status=200) domain = Domain(client=self.client) response = domain.approve_delegation(domain_id, org_id) post_data =", "= self.cfixt.client self.api_url = f\"{self.cfixt.base_url}/domain/v1\" # Setup a test response", "parameters to the request body \"\"\" # Setup the mocked", "creation failed with DomainCreationResponseError (no Location header in response). \"\"\"", "\"\"\"The function should raise an HTTPError exception if the specified", "\"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate", "f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=404) domain", "self.assertRaises(HTTPError, domain.count) # Verify all the query information self.assertEqual(len(responses.calls), 1)", "def test_domain_id(self): \"\"\"The function should return data about the specified", "mocked response responses.add(responses.POST, self.api_url, status=201) domain = Domain(client=self.client) create_args =", "parameters, the method will return all domains\"\"\" # Setup the", "description if the Domain creation failed. \"\"\" # Setup the", "responses.add(responses.POST, self.api_url, status=201) domain = Domain(client=self.client) create_args = { \"name\":", "Domain(client=self.client) data = domain.find(name=\"example.com\") # Verify all the query information", "all domains\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response,", "Setup the mocked response api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=self.error_response,", "\"certTypes\": [\"SSL\"]}], \"description\": \"Example sub domain\" } response = domain.create(\"sub2.example.com\",", "raise an HTTPError exception if the rejection failed.\"\"\" domain_id =", "code). \"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response,", "self.assertRaises(ValueError, domain.create, **create_args) @responses.activate def test_create_failure_http_status_unexpected(self): \"\"\" The function should", "True if the suspension succeeded.\"\"\" domain_id = 1234 api_url =", "this is only guaranteed as long as the number of", "size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate", "Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200) api_url =", "query the API twice.\"\"\" # Setup the mocked response responses.add(responses.GET,", "self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_delegate_failure_http_error(self): \"\"\"The function should", "status=201) domain = Domain(client=self.client) post_data = { \"name\": \"sub2.example.com\", \"delegations\":", "f\"{api_url}?name=example.com\") self.assertEqual(data, count) @responses.activate def test_bad_http(self): \"\"\"The function should raise", "domain.create(\"sub2.example.com\", 4321, [\"SSL\"], description=\"Example sub domain\") self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body,", "Domain(client=self.client) post_data = { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": org_id, \"certTypes\":", "self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate def test_ne_domain_id(self): \"\"\"The", "4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_domain_id_not_found(self):", "\"\"\"The function should raise an HTTPError exception if the rejection", "self.assertEqual(True, response) @responses.activate def test_delete_failure_http_error(self): \"\"\" The function should raise", "succeeded.\"\"\" domain_id = 1234 org_id = 4321 types = [\"SSL\"]", "@responses.activate def test_create_failure_http_status_unexpected(self): \"\"\" The function should return an error", "the data, but should query the API twice.\"\"\" # Setup", "mocked response responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response =", "status=200) domain = Domain(client=self.client) data = domain.find() self.assertEqual(data, self.valid_response) @responses.activate", "domain = Domain(client=self.client) data = domain.all() data = domain.all() #", "data = domain.count(name=\"example.com\") # Verify all the query information self.assertEqual(responses.calls[0].request.url,", "function should raise an HTTPError exception if the approval failed.\"\"\"", "ClientFixture class TestDomain(TestCase): # pylint: disable=too-few-public-methods \"\"\"Serve as a Base", "Domain(client=self.client) data = domain.count(name=\"example.com\") # Verify all the query information", "an HTTPError exception if counts cannot be retrieved from the", "domain.remove_delegation) @responses.activate def test_remove_delegation_success(self): \"\"\"The function should return True if", "self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_success_optional_params(self): \"\"\" The function should return", "response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.reject_delegation, domain_id,", "will be passed to API\"\"\" # Setup the mocked response", "api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate def test_bad_http(self): \"\"\"The function should raise", "domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def", "# Setup the mocked response responses.add(responses.DELETE, api_url, status=200) domain =", "Base class for all tests of the Domain class.\"\"\" def", "specified Domain ID does not exist.\"\"\" domain_id = 2345 api_url", "new version version = \"v3\" api_url = f\"{self.cfixt.base_url}/domain/{version}\" # Setup", "def test_forced(self): \"\"\"The function should return all the data, but", "test_delegate_success(self): \"\"\"The function should return True if the delegation succeeded.\"\"\"", "def test_remove_delegation_success(self): \"\"\"The function should return True if the delegation", "# missing domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def test_reject_delegation_success(self): \"\"\"The function", "to check every permutation of missing parameters, # but verify", "succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the", "data, but should query the API twice.\"\"\" # Setup the", "be passed to API\"\"\" # Setup the mocked response count", "Setup the mocked response responses.add(responses.GET, api_url, status=404) domain = Domain(client=self.client)", "required self.assertRaises(TypeError, domain.create) @responses.activate def test_create_success(self): \"\"\" The function should", "class TestDomain(TestCase): # pylint: disable=too-few-public-methods \"\"\"Serve as a Base class", "if the Domain creation failed with DomainCreationResponseError (unexpected HTTP status", "self.assertEqual(data, self.valid_response) @responses.activate def test_params(self): \"\"\"Parameters will be passed to", "function should return True if the deletion succeeded.\"\"\" domain_id =", "response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.delegate(domain_id,", "= Domain(client=self.client) post_data = { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": org_id,", "status=200) domain = Domain(client=self.client) response = domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate", "api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked response responses.add(responses.PUT, api_url,", "exception if counts cannot be retrieved from the API.\"\"\" #", "removal succeeded.\"\"\" domain_id = 1234 org_id = 4321 types =", "raise an HTTPError exception if the approval failed.\"\"\" domain_id =", "response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_delegate_failure_http_error(self): \"\"\"The function should raise", "exception if the deletion failed. \"\"\" domain_id = 1234 api_url", "from the API.\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url,", "self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.all) # Verify", "domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def", "self.assertRaises(HTTPError, domain.activate, domain_id) class TestSuspend(TestDomain): \"\"\"Test the .suspend method.\"\"\" @responses.activate", "information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain): \"\"\"Test the .count", "suspension succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup", "setup self.cfixt = self.useFixture(ClientFixture()) self.client = self.cfixt.client self.api_url = f\"{self.cfixt.base_url}/domain/v1\"", "Setup the mocked response domain_id = 1234 location = f\"{self.api_url}/{str(domain_id)}\"", "from .lib.testbase import ClientFixture class TestDomain(TestCase): # pylint: disable=too-few-public-methods \"\"\"Serve", "f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.POST, api_url, status=404) domain", "and description if the Domain creation failed with DomainCreationResponseError (Domain", "returned is less than the page size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url,", "TestDelegate(TestDomain): \"\"\"Test the .delegate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", "\"sub2.example.com\", \"delegations\": [{\"orgId\": org_id, \"certTypes\": types}] } response = domain.create(\"sub2.example.com\",", "} self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class TestDelete(TestDomain): \"\"\"Test the .delete method.\"\"\"", "the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain): \"\"\"Test", "def test_create_failure_http_error(self): \"\"\" The function should return an error code", "should raise an HTTPError exception if the specified Domain ID", "json=self.error_response, status=400) domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\",", "should return all the data, but should not query the", "self.assertRaises(TypeError, domain.suspend) @responses.activate def test_suspend_success(self): \"\"\"The function should return True", "-*- \"\"\"Define the cert_manager.domain.Domain unit tests.\"\"\" # Don't warn about", "@responses.activate def test_create_success_optional_params(self): \"\"\" The function should return the created", "\"\"\"Initialize the class.\"\"\" # Call the inherited setUp method super().setUp()", "every permutation of missing parameters, # but verify that something", "TestFind(TestDomain): \"\"\"Test the .find method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters,", "an HTTPError exception if the specified Domain ID does not", "TestDomain(TestCase): # pylint: disable=too-few-public-methods \"\"\"Serve as a Base class for", "(Domain ID not found in response). \"\"\" # Setup the", "self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_bad_http(self): \"\"\"The function", "test_delete_success(self): \"\"\"The function should return True if the deletion succeeded.\"\"\"", "class.\"\"\" def setUp(self): # pylint: disable=invalid-name \"\"\"Initialize the class.\"\"\" #", "is created and setup self.cfixt = self.useFixture(ClientFixture()) self.client = self.cfixt.client", "disable=too-few-public-methods \"\"\"Serve as a Base class for all tests of", "responses.add(responses.GET, api_url, json=count, status=200) domain = Domain(client=self.client) data = domain.count(name=\"example.com\")", "@responses.activate def test_domain_id(self): \"\"\"The function should return data about the", "HTTPError exception if the delegation removal failed.\"\"\" domain_id = 1234", "= Domain(client=self.client) response = domain.approve_delegation(domain_id, org_id) post_data = { \"orgId\":", "disable=no-member import json from requests.exceptions import HTTPError from testtools import", "1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.GET,", "the first time \"all\" is called. # Due to pagination,", "domain.find) # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url,", "domain.create) @responses.activate def test_create_success(self): \"\"\" The function should return the", "add the non-required parameters to the request body \"\"\" #", "delegation removal failed.\"\"\" domain_id = 1234 org_id = 4321 types", "\"description\": \"Example sub domain\" } response = domain.create(\"sub2.example.com\", 4321, [\"SSL\"],", "called. # Due to pagination, this is only guaranteed as", "\"not a url\"}, status=201) domain = Domain(client=self.client) create_args = {", "def test_create_failure_domain_id_not_found(self): \"\"\" The function should return an error code", "= domain.reject_delegation(domain_id, org_id) post_data = { \"orgId\": org_id, } self.assertEqual(True,", "domain.activate, domain_id) class TestSuspend(TestDomain): \"\"\"Test the .suspend method.\"\"\" @responses.activate def", "error\"} class TestInit(TestDomain): \"\"\"Test the class initializer.\"\"\" @responses.activate def test_param(self):", "self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_remove_delegation_failure_http_error(self): \"\"\"The function should raise an", "exception if the specified Domain ID does not exist.\"\"\" domain_id", "@responses.activate def test_suspend_success(self): \"\"\"The function should return True if the", "domain.get) @responses.activate def test_domain_id(self): \"\"\"The function should return data about", "= f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=200)", "test_reject_delegation_success(self): \"\"\"The function should return True if the rejection succeeded.\"\"\"", "self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_approval_failure_http_error(self): \"\"\"The function should raise an", "failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup", "response = domain.remove_delegation(domain_id, org_id, types) post_data = { \"orgId\": org_id,", "the .get method.\"\"\" @responses.activate def test_need_domain_id(self): \"\"\"The function should raise", "test_param(self): \"\"\"The URL should change if api_version is passed as", "if the delegation removal succeeded.\"\"\" domain_id = 1234 org_id =", "responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200) api_url = f\"{self.api_url}?name=example.com\" domain = Domain(client=self.client)", "without a client parameter.\"\"\" self.assertRaises(TypeError, Domain) class TestAll(TestDomain): \"\"\"Test the", "the rejection succeeded.\"\"\" domain_id = 1234 org_id = 4321 api_url", "is less than the page size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url)", "(unexpected HTTP status code). \"\"\" # Setup the mocked response", "responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id, org_id,", "should raise an HTTPError exception if the delegation removal failed.\"\"\"", "\"\"\"The function should raise an HTTPError exception if the delegation", "self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain): \"\"\"Test the .count method.\"\"\" @responses.activate def", "pylint: disable=invalid-name \"\"\"Initialize the class.\"\"\" # Call the inherited setUp", "mocked response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError,", "information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate def test_bad_http(self): \"\"\"The function", "types = [\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked", "should return True if the rejection succeeded.\"\"\" domain_id = 1234", ".count method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the method will", "should return True if the suspension succeeded.\"\"\" domain_id = 1234", "self.valid_response = [ {\"id\": 1234, \"name\": \"example.com\"}, {\"id\": 4321, \"name\":", "error code and description if the Domain creation failed with", "api_url, status=200) domain = Domain(client=self.client) response = domain.approve_delegation(domain_id, org_id) post_data", "status=200) domain = Domain(client=self.client, api_version=version) data = domain.all() # Verify", "fixture is created and setup self.cfixt = self.useFixture(ClientFixture()) self.client =", "self.valid_response[0] self.valid_individual_response[\"status\"] = \"Active\" # Setup JSON to return in", "response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id)", "class TestRejectDelegation(TestDomain): \"\"\"Test the .reject_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "= \"Active\" # Setup JSON to return in an error", "Domain(client=self.client) response = domain.delete(domain_id) self.assertEqual(True, response) @responses.activate def test_delete_failure_http_error(self): \"\"\"", "should return True if the delegation removal succeeded.\"\"\" domain_id =", "is passed as a parameter.\"\"\" # Set a new version", "self.assertRaises(HTTPError, domain.get, domain_id) class TestCreate(TestDomain): \"\"\"Test the .create method.\"\"\" @responses.activate", "ID not found in response). \"\"\" # Setup the mocked", "domains\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)", "Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data,", "{\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_failure_http_error(self): \"\"\" The function", "domain_id = 1234 org_id = 4321 types = [\"SSL\"] api_url", "= Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate def test_suspend_success(self):", "1234 location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201) domain", "called without required parameters. \"\"\" domain = Domain(client=self.client) # missing", "mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data", "pylint: disable=protected-access # pylint: disable=no-member import json from requests.exceptions import", "information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain): \"\"\"Test the .find", "responses.add(responses.POST, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) create_args = {", "\"\"\"Test the .remove_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.count) # Verify all the", "the delegation removal failed.\"\"\" domain_id = 1234 org_id = 4321", "domain ID, as well as add all parameters to the", "def test_delete_success(self): \"\"\"The function should return True if the deletion", "class TestCount(TestDomain): \"\"\"Test the .count method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without", "True if the approval succeeded.\"\"\" domain_id = 1234 org_id =", "= Domain(client=self.client) response = domain.remove_delegation(domain_id, org_id, types) post_data = {", "@responses.activate def test_reject_delegation_success(self): \"\"\"The function should return True if the", "responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201) domain = Domain(client=self.client) post_data =", "should return data about the specified Domain ID.\"\"\" domain_id =", "[\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_missing_location_header(self): \"\"\" The", "HTTPError exception if the deletion failed. \"\"\" domain_id = 1234", "returned is less than the page size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url,", "\"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=400)", "from the API.\"\"\" # Setup the mocked response api_url =", "domain.find(name=\"example.com\") # Verify all the query information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data,", "= [\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response", "# Not going to check every permutation of missing parameters,", "Domain(client=self.client) data = domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response)", "} self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_reject_failure_http_error(self): \"\"\"The function", "the specified Domain ID.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\"", "1234, \"name\": \"example.com\"}, {\"id\": 4321, \"name\": \"*.example.com\"}, {\"id\": 4322, \"name\":", "{ \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": 4321, \"certTypes\": [\"SSL\"]}], \"description\": \"Example", "HTTPError exception if domains cannot be retrieved from the API.\"\"\"", "self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_reject_failure_http_error(self): \"\"\"The function should raise an", "query information # There should only be one call the", "if the Domain creation failed with DomainCreationResponseError (Domain ID not", "DomainCreationResponseError (unexpected HTTP status code). \"\"\" # Setup the mocked", "all parameters to the request body \"\"\" # Setup the", "**create_args) @responses.activate def test_create_failure_missing_location_header(self): \"\"\" The function should return an", "\"name\": \"example.com\"}, {\"id\": 4321, \"name\": \"*.example.com\"}, {\"id\": 4322, \"name\": \"subdomain.example.com\"},", "responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response = domain.activate(domain_id) self.assertEqual(True,", "def test_params(self): \"\"\"Parameters will be passed to API\"\"\" # Setup", "missing parameters, # but verify that something is required self.assertRaises(TypeError,", "**create_args) @responses.activate def test_create_failure_domain_id_not_found(self): \"\"\" The function should return an", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id) class TestActivate(TestDomain): \"\"\"Test the", "domain = Domain(client=self.client) response = domain.activate(domain_id) self.assertEqual(True, response) @responses.activate def", "API\"\"\" # Setup the mocked response count = {\"count\": len(self.valid_response[0])}", "4321 types = [\"SSL\"] api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the", "The function should raise an HTTPError exception if the suspension", "# Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_response, status=200) domain", "4321 types = [\"SSL\"] location = f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\":", "= Domain(client=self.client) data = domain.all() data = domain.all(force=True) # Verify", "= domain.activate(domain_id) self.assertEqual(True, response) @responses.activate def test_activate_failure_http_error(self): \"\"\" The function", "return all domains\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url,", "function should return an error code and description if the", "# Setup JSON to return in an error self.error_response =", "Setup JSON to return in an error self.error_response = {\"description\":", "response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id)", "domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate def", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id) class TestDelegate(TestDomain): \"\"\"Test the .delegate", "than the page size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url)", "domain = Domain(client=self.client) response = domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate def", "self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_bad_http(self): \"\"\"The function should", "response count = {\"count\": len(self.valid_response[0])} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url,", "def test_bad_http(self): \"\"\"The function should raise an HTTPError exception if", "= domain.create(\"sub2.example.com\", 4321, [\"SSL\"], description=\"Example sub domain\") self.assertEqual(response, {\"id\": domain_id})", "data = domain.all() # Verify all the query information #", "exception without an domain_id parameter.\"\"\" domain = Domain(client=self.client) self.assertRaises(TypeError, domain.get)", "self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_failure_http_error(self): \"\"\" The function should return", "} self.assertRaises(ValueError, domain.create, **create_args) @responses.activate def test_create_failure_http_status_unexpected(self): \"\"\" The function", "tests of the Domain class.\"\"\" def setUp(self): # pylint: disable=invalid-name", "1) self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain): \"\"\"Test the .get method.\"\"\" @responses.activate", "failed.\"\"\" domain_id = 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\"", "domain_id, org_id) class TestRejectDelegation(TestDomain): \"\"\"Test the .reject_delegation method.\"\"\" @responses.activate def", "succeeded.\"\"\" domain_id = 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\"", "\"\"\"The function should return True if the rejection succeeded.\"\"\" domain_id", "\"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_missing_location_header(self): \"\"\"", "size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_forced(self):", "= 2345 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response", "mocked response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.activate,", "Setup the mocked response responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client)", "Domain creation failed. \"\"\" # Setup the mocked response responses.add(responses.POST,", "domain = Domain(client=self.client) data = domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url)", "Domain, DomainCreationResponseError from .lib.testbase import ClientFixture class TestDomain(TestCase): # pylint:", "is only guaranteed as long as the number of #", "if the Domain creation failed with DomainCreationResponseError (no Location header", "missing domain_id self.assertRaises(TypeError, domain.delete) @responses.activate def test_delete_success(self): \"\"\"The function should", "import Domain, DomainCreationResponseError from .lib.testbase import ClientFixture class TestDomain(TestCase): #", "responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200) domain = Domain(client=self.client) data = domain.get(domain_id)", "f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked response responses.add(responses.PUT, api_url, status=200) domain", "self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def test_approve_delegation_success(self): \"\"\"The function should return True", "Domain(client=self.client) response = domain.activate(domain_id) self.assertEqual(True, response) @responses.activate def test_activate_failure_http_error(self): \"\"\"", "= Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delete) @responses.activate def test_delete_success(self):", "class TestFind(TestDomain): \"\"\"Test the .find method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without", "if the deletion failed. \"\"\" domain_id = 1234 api_url =", "domain.approve_delegation(domain_id, org_id) post_data = { \"orgId\": org_id, } self.assertEqual(True, response)", "as long as the number of # entries returned is", "all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain):", "# Setup a test response for getting a specific Domain", "= f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked response responses.add(responses.PUT, api_url, status=200)", "org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_approval_failure_http_error(self): \"\"\"The", "all tests of the Domain class.\"\"\" def setUp(self): # pylint:", "api_url) self.assertEqual(data, self.valid_response) def test_need_client(self): \"\"\"The class should raise an", "**create_args) @responses.activate def test_create_failure_http_status_unexpected(self): \"\"\" The function should return an", "delegation failed.\"\"\" domain_id = 1234 org_id = 4321 types =", "@responses.activate def test_delete_success(self): \"\"\"The function should return True if the", "if the delegation removal failed.\"\"\" domain_id = 1234 org_id =", "an error code and description if the Domain creation failed", "parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.remove_delegation)", "the mocked response responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response", "one would expect normally self.valid_response = [ {\"id\": 1234, \"name\":", "one call the first time \"all\" is called. # Due", "response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.all)", "\"cert_types\": [\"other\"] } self.assertRaises(ValueError, domain.create, **create_args) @responses.activate def test_create_failure_http_status_unexpected(self): \"\"\"", "the cert_manager.domain.Domain unit tests.\"\"\" # Don't warn about things that", "4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\" # Setup the mocked response responses.add(responses.POST,", "the .create method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "and description if the Domain creation failed. \"\"\" # Setup", "test_cached(self): \"\"\"The function should return all the data, but should", "of # entries returned is less than the page size", "api_url = f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.POST, api_url,", "test_params(self): \"\"\"Parameters will be passed to API\"\"\" # Setup the", "test_reject_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if the", "the mocked response responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response", "# Setup the mocked response responses.add(responses.PUT, api_url, status=404) domain =", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types) class TestRemoveDelegation(TestDomain): \"\"\"Test", "\"\"\"The function should return True if the activation succeeded.\"\"\" domain_id", "the .suspend method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "\"\"\"Serve as a Base class for all tests of the", "data = domain.all() # Verify all the query information self.assertEqual(len(responses.calls),", "@responses.activate def test_ne_domain_id(self): \"\"\"The function should raise an HTTPError exception", "created domain ID when additional params are specified, as well", "response responses.add(responses.POST, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) create_args =", "domain.all() # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url,", "{ \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": org_id, \"certTypes\": types}] } response", "failed. \"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response,", "\"*.example.com\"}, {\"id\": 4322, \"name\": \"subdomain.example.com\"}, ] # Setup a test", "\"name\": \"subdomain.example.com\"}, ] # Setup a test response for getting", "all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response)", "= Domain(client=self.client) data = domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate", "test_remove_delegation_success(self): \"\"\"The function should return True if the delegation removal", "Not going to check every permutation of missing parameters, #", "\"\"\"Test the .delegate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200) domain =", "sub domain\") self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_failure_http_error(self):", "types}] } response = domain.create(\"sub2.example.com\", org_id, types) self.assertEqual(response, {\"id\": domain_id})", "domain = Domain(client=self.client) response = domain.approve_delegation(domain_id, org_id) post_data = {", "json=self.valid_response[0], status=200) api_url = f\"{self.api_url}?name=example.com\" domain = Domain(client=self.client) data =", "api_url, status=200) domain = Domain(client=self.client) response = domain.suspend(domain_id) self.assertEqual(True, response)", "\"certTypes\": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_remove_delegation_failure_http_error(self):", "def test_suspend_success(self): \"\"\"The function should return True if the suspension", "testtools import TestCase import responses from cert_manager.domain import Domain, DomainCreationResponseError", "specific Domain self.valid_individual_response = self.valid_response[0] self.valid_individual_response[\"status\"] = \"Active\" # Setup", "add all parameters to the request body \"\"\" # Setup", "\"\"\"The function should raise an exception without an domain_id parameter.\"\"\"", "domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def", "domain.create(\"sub2.example.com\", org_id, types) self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def", "= Domain(client=self.client) # Not going to check every permutation of", "response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_approval_failure_http_error(self): \"\"\"The function should raise", "api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response responses.add(responses.GET, api_url,", "# pylint: disable=no-member import json from requests.exceptions import HTTPError from", "error self.error_response = {\"description\": \"domain error\"} class TestInit(TestDomain): \"\"\"Test the", "exception when called without required parameters. \"\"\" domain = Domain(client=self.client)", "count = {\"count\": len(self.valid_response)} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count,", "domain.reject_delegation) @responses.activate def test_reject_delegation_success(self): \"\"\"The function should return True if", "\"\"\"Test the .activate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "delegation succeeded.\"\"\" domain_id = 1234 org_id = 4321 types =", "HTTPError from testtools import TestCase import responses from cert_manager.domain import", "API\"\"\" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200)", "json=self.valid_response, status=200) domain = Domain(client=self.client, api_version=version) data = domain.all() #", "\"\"\" The function should return the created domain ID, as", "api_url = f\"{self.cfixt.base_url}/domain/{version}\" # Setup the mocked response responses.add(responses.GET, api_url,", "self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) create_args = { \"name\":", "self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.all() data", "status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id) class TestSuspend(TestDomain): \"\"\"Test", "the delegation removal succeeded.\"\"\" domain_id = 1234 org_id = 4321", "\"\"\"Test the .reject_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "the approval succeeded.\"\"\" domain_id = 1234 org_id = 4321 api_url", "domain.get, domain_id) class TestCreate(TestDomain): \"\"\"Test the .create method.\"\"\" @responses.activate def", "\"orgId\": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_reject_failure_http_error(self):", "domain.all() data = domain.all() # Verify all the query information", "\"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"other\"] } self.assertRaises(ValueError, domain.create, **create_args)", "status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types) class", "f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked response responses.add(responses.POST, api_url, status=404) domain", "True if the delegation succeeded.\"\"\" domain_id = 1234 org_id =", "disable=invalid-name \"\"\"Initialize the class.\"\"\" # Call the inherited setUp method", "= { \"orgId\": org_id, \"certTypes\": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body,", "unit tests.\"\"\" # Don't warn about things that happen as", "# Verify all the query information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0])", "api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=self.error_response, status=400) domain = Domain(client=self.client)", "Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_response, status=200) domain =", "Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate def test_delegate_success(self): \"\"\"The", "self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_forced(self): \"\"\"The function should return", "the method will return all domains\"\"\" # Setup the mocked", "def test_create_success_optional_params(self): \"\"\" The function should return the created domain", "count all domains\"\"\" # Setup the mocked response count =", "status=200) domain = Domain(client=self.client) response = domain.reject_delegation(domain_id, org_id) post_data =", "the mocked response responses.add(responses.POST, self.api_url, status=201) domain = Domain(client=self.client) create_args", "1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" # Setup the mocked response responses.add(responses.PUT,", "4321, \"cert_types\": [\"other\"] } self.assertRaises(ValueError, domain.create, **create_args) @responses.activate def test_create_failure_http_status_unexpected(self):", "entries returned is less than the page size self.assertEqual(len(responses.calls), 1)", "= 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup", "# Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain", "the .find method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the method", "response = domain.activate(domain_id) self.assertEqual(True, response) @responses.activate def test_activate_failure_http_error(self): \"\"\" The", "the number of # entries returned is less than the", "and description if the Domain creation failed with DomainCreationResponseError (no", "f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the mocked response responses.add(responses.PUT, api_url, status=404) domain", "@responses.activate def test_cached(self): \"\"\"The function should return all the data,", "} self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_approval_failure_http_error(self): \"\"\"The function", "the activation succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" #", "an HTTPError exception if the deletion failed. \"\"\" domain_id =", "responses.add(responses.GET, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id) class", "without required parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id", "# Verify all the query information # There should only", "= 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the mocked response", "change if api_version is passed as a parameter.\"\"\" # Set", "HTTPError exception if the delegation failed.\"\"\" domain_id = 1234 org_id", "not exist.\"\"\" domain_id = 2345 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup", "[\"SSL\"], description=\"Example sub domain\") self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate", "= self.valid_response[0] self.valid_individual_response[\"status\"] = \"Active\" # Setup JSON to return", "@responses.activate def test_param(self): \"\"\"The URL should change if api_version is", "self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_forced(self): \"\"\"The function should", "Domain(client=self.client, api_version=version) data = domain.all() # Verify all the query", "\"\"\"Test the .count method.\"\"\" @responses.activate def test_no_params(self): \"\"\"Without parameters, the", "json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.count) # Verify all", "found in response). \"\"\" # Setup the mocked response responses.add(responses.POST,", "domain.delegate) @responses.activate def test_delegate_success(self): \"\"\"The function should return True if", "be retrieved from the API.\"\"\" # Setup the mocked response", "the mocked response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError,", "\"\"\"The function should return True if the approval succeeded.\"\"\" domain_id", "\"\"\"Test the class initializer.\"\"\" @responses.activate def test_param(self): \"\"\"The URL should", "the .activate method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "json=count, status=200) domain = Domain(client=self.client) data = domain.count() self.assertEqual(data, count)", "domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.activate) @responses.activate def", "self.assertEqual(True, response) @responses.activate def test_suspend_failure_http_error(self): \"\"\" The function should raise", "the mocked response domain_id = 1234 org_id = 4321 types", "test_suspend_success(self): \"\"\"The function should return True if the suspension succeeded.\"\"\"", "domain = Domain(client=self.client) response = domain.remove_delegation(domain_id, org_id, types) post_data =", "rejection succeeded.\"\"\" domain_id = 1234 org_id = 4321 api_url =", ".remove_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "part of unit testing # pylint: disable=protected-access # pylint: disable=no-member", "@responses.activate def test_approve_delegation_success(self): \"\"\"The function should return True if the", "parameter.\"\"\" # Set a new version version = \"v3\" api_url", "= Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def test_remove_delegation_success(self):", ".suspend method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should raise", "\"\"\" The function should raise an exception when called without", "org_id, types) self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_create_success_optional_params(self):", "= f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.POST, api_url, status=200)", "org_id, \"certTypes\": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def", "= Domain(client=self.client) response = domain.reject_delegation(domain_id, org_id) post_data = { \"orgId\":", "if the approval succeeded.\"\"\" domain_id = 1234 org_id = 4321", "# pylint: disable=invalid-name \"\"\"Initialize the class.\"\"\" # Call the inherited", "Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=200) domain =", "domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate def test_ne_domain_id(self):", "1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response) def test_need_client(self): \"\"\"The class should", "domain_id) class TestActivate(TestDomain): \"\"\"Test the .activate method.\"\"\" @responses.activate def test_need_params(self):", "required parameters. \"\"\" domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError,", "api_url = f\"{self.api_url}/{str(domain_id)}/delegation/reject\" # Setup the mocked response responses.add(responses.POST, api_url,", "self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.find() self.assertEqual(data,", "self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_bad_http(self): \"\"\"The function should raise", "getting a specific Domain self.valid_individual_response = self.valid_response[0] self.valid_individual_response[\"status\"] = \"Active\"", "the delegation succeeded.\"\"\" domain_id = 1234 org_id = 4321 types", "if the delegation failed.\"\"\" domain_id = 1234 org_id = 4321", "version = \"v3\" api_url = f\"{self.cfixt.base_url}/domain/{version}\" # Setup the mocked", "cert_manager.domain.Domain unit tests.\"\"\" # Don't warn about things that happen", "response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id,", "= Domain(client=self.client) data = domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data,", "function should raise an HTTPError exception if domains cannot be", "f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200) domain = Domain(client=self.client) data =", "request body \"\"\" # Setup the mocked response domain_id =", "to API\"\"\" # Setup the mocked response count = {\"count\":", "Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.activate) @responses.activate def test_activate_success(self): \"\"\"The", "@responses.activate def test_no_params(self): \"\"\"Without parameters, the method will count all", "# Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=400) domain", "types) class TestApproveDelegation(TestDomain): \"\"\"Test the .approve_delegation method.\"\"\" @responses.activate def test_need_params(self):", "api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id) class TestDelegate(TestDomain):", "= domain.create(\"sub2.example.com\", org_id, types) self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate", "domain ID when additional params are specified, as well add", "if the delegation succeeded.\"\"\" domain_id = 1234 org_id = 4321", "Domain(client=self.client) response = domain.reject_delegation(domain_id, org_id) post_data = { \"orgId\": org_id,", "a Base class for all tests of the Domain class.\"\"\"", "class TestCreate(TestDomain): \"\"\"Test the .create method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "be passed to API\"\"\" # Setup the mocked response responses.add(responses.GET,", "failed with DomainCreationResponseError (no Location header in response). \"\"\" #", "= Domain(client=self.client) data = domain.find() self.assertEqual(data, self.valid_response) @responses.activate def test_params(self):", "TestCreate(TestDomain): \"\"\"Test the .create method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The", "is required self.assertRaises(TypeError, domain.create) @responses.activate def test_create_success(self): \"\"\" The function", "failed.\"\"\" domain_id = 1234 org_id = 4321 api_url = f\"{self.api_url}/{str(domain_id)}/delegation/approve\"", "self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate def test_bad_http(self): \"\"\"The function should", "mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.reject_delegation,", "with DomainCreationResponseError (unexpected HTTP status code). \"\"\" # Setup the", "less than the page size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data,", "the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain): \"\"\"Test", "be one call the first time \"all\" is called. #", "a specific Domain self.valid_individual_response = self.valid_response[0] self.valid_individual_response[\"status\"] = \"Active\" #", "the specified Domain ID does not exist.\"\"\" domain_id = 2345", "response). \"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url, status=201)", "domain = Domain(client=self.client) response = domain.delete(domain_id) self.assertEqual(True, response) @responses.activate def", "# missing domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate def test_suspend_success(self): \"\"\"The function", "mocked response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation,", "= domain.all() # Verify all the query information # There", "missing domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def test_remove_delegation_success(self): \"\"\"The function should", ".all method.\"\"\" @responses.activate def test_cached(self): \"\"\"The function should return all", "status=201) domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\", \"org_id\":", "post_data = { \"orgId\": org_id, \"certTypes\": types } self.assertEqual(True, response)", "@responses.activate def test_create_failure_domain_id_not_found(self): \"\"\" The function should return an error", "domain.all() # Verify all the query information # There should", "query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain): \"\"\"Test the", "raise an exception when called without required parameters. \"\"\" domain", "method.\"\"\" @responses.activate def test_need_domain_id(self): \"\"\"The function should raise an exception", "responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response = domain.remove_delegation(domain_id, org_id,", "org_id) class TestRejectDelegation(TestDomain): \"\"\"Test the .reject_delegation method.\"\"\" @responses.activate def test_need_params(self):", "status=200) domain = Domain(client=self.client) response = domain.remove_delegation(domain_id, org_id, types) post_data", "# Setup the mocked response responses.add(responses.PUT, api_url, status=200) domain =", "[\"SSL\"]}], \"description\": \"Example sub domain\" } response = domain.create(\"sub2.example.com\", 4321,", "something is required self.assertRaises(TypeError, domain.create) @responses.activate def test_create_success(self): \"\"\" The", "def test_suspend_failure_http_error(self): \"\"\" The function should raise an HTTPError exception", "\"delegations\": [{\"orgId\": 4321, \"certTypes\": [\"SSL\"]}], \"description\": \"Example sub domain\" }", "the query information # There should only be one call", "that is part of unit testing # pylint: disable=protected-access #", "in response). \"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url,", "test_create_failure_missing_location_header(self): \"\"\" The function should return an error code and", "headers={\"Location\": location}, status=201) domain = Domain(client=self.client) post_data = { \"name\":", "status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.find) # Verify all the", "succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/activate\" # Setup the", "self.client = self.cfixt.client self.api_url = f\"{self.cfixt.base_url}/domain/v1\" # Setup a test", "responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id)", "api_url, status=200) domain = Domain(client=self.client) response = domain.delete(domain_id) self.assertEqual(True, response)", "self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_delegate_failure_http_error(self): \"\"\"The function should raise an", "method super().setUp() # Make sure the Client fixture is created", "Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id) class TestRejectDelegation(TestDomain): \"\"\"Test the .reject_delegation", "DomainCreationResponseError (Domain ID not found in response). \"\"\" # Setup", "params are specified, as well add the non-required parameters to", "# Setup the mocked response domain_id = 1234 location =", "response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id,", "domain.activate) @responses.activate def test_activate_success(self): \"\"\"The function should return True if", "\"\"\"Test the .suspend method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function", "information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\") self.assertEqual(data, count) @responses.activate def test_bad_http(self): \"\"\"The function", "Domain creation failed with DomainCreationResponseError (Domain ID not found in", "= domain.remove_delegation(domain_id, org_id, types) post_data = { \"orgId\": org_id, \"certTypes\":", "json=count, status=200) domain = Domain(client=self.client) data = domain.count(name=\"example.com\") # Verify", "json=self.error_response, status=200) domain = Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\",", "information # There should only be one call the first", "with DomainCreationResponseError (Domain ID not found in response). \"\"\" #", "but should not query the API twice.\"\"\" # Setup the", "ID.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the", "} self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_delegate_failure_http_error(self): \"\"\"The function", "domains\"\"\" # Setup the mocked response count = {\"count\": len(self.valid_response)}", "Setup a test response for getting a specific Domain self.valid_individual_response", "= domain.count(name=\"example.com\") # Verify all the query information self.assertEqual(responses.calls[0].request.url, f\"{api_url}?name=example.com\")", "client parameter.\"\"\" self.assertRaises(TypeError, Domain) class TestAll(TestDomain): \"\"\"Test the .all method.\"\"\"", "{ \"orgId\": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def", "an exception without an domain_id parameter.\"\"\" domain = Domain(client=self.client) self.assertRaises(TypeError,", "domain.reject_delegation(domain_id, org_id) post_data = { \"orgId\": org_id, } self.assertEqual(True, response)", "Domain ID does not exist.\"\"\" domain_id = 2345 api_url =", "@responses.activate def test_create_failure_http_error(self): \"\"\" The function should return an error", "self.valid_response[0]) @responses.activate def test_bad_http(self): \"\"\"The function should raise an HTTPError", "missing domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def test_reject_delegation_success(self): \"\"\"The function should", "raise an exception without a client parameter.\"\"\" self.assertRaises(TypeError, Domain) class", "def setUp(self): # pylint: disable=invalid-name \"\"\"Initialize the class.\"\"\" # Call", "status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.all) # Verify all the", "the .remove_delegation method.\"\"\" @responses.activate def test_need_params(self): \"\"\" The function should", "{\"count\": len(self.valid_response)} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count, status=200) domain", "domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delete) @responses.activate def", "= 1234 org_id = 4321 types = [\"SSL\"] location =", "response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.find)", "that something is required self.assertRaises(TypeError, domain.create) @responses.activate def test_create_success(self): \"\"\"", "org_id, types) class TestRemoveDelegation(TestDomain): \"\"\"Test the .remove_delegation method.\"\"\" @responses.activate def", "self.assertEqual(data, self.valid_response) @responses.activate def test_bad_http(self): \"\"\"The function should raise an", "response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.reject_delegation(domain_id,", "class TestSuspend(TestDomain): \"\"\"Test the .suspend method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "failed with DomainCreationResponseError (unexpected HTTP status code). \"\"\" # Setup", "the suspension succeeded.\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}/suspend\" #", "= { \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError,", "# Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url)", "responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.find) #", "domain.approve_delegation, domain_id, org_id) class TestRejectDelegation(TestDomain): \"\"\"Test the .reject_delegation method.\"\"\" @responses.activate", "domain = Domain(client=self.client) data = domain.count(name=\"example.com\") # Verify all the", "function should return True if the delegation succeeded.\"\"\" domain_id =", "function should raise an HTTPError exception if the delegation failed.\"\"\"", "if the specified Domain ID does not exist.\"\"\" domain_id =", "Call the inherited setUp method super().setUp() # Make sure the", "Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=400) domain =", "{\"id\": 4321, \"name\": \"*.example.com\"}, {\"id\": 4322, \"name\": \"subdomain.example.com\"}, ] #", "\"\"\"The URL should change if api_version is passed as a", "api_url, json=self.valid_individual_response, status=200) domain = Domain(client=self.client) data = domain.get(domain_id) self.assertEqual(len(responses.calls),", "mocked response count = {\"count\": len(self.valid_response)} api_url = f\"{self.api_url}/count\" responses.add(responses.GET,", "should return the created domain ID, as well as add", "raise an HTTPError exception if counts cannot be retrieved from", "import HTTPError from testtools import TestCase import responses from cert_manager.domain", "the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client)", "# Setup the mocked response responses.add(responses.POST, self.api_url, status=201) domain =", "\"domain error\"} class TestInit(TestDomain): \"\"\"Test the class initializer.\"\"\" @responses.activate def", "an domain_id parameter.\"\"\" domain = Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate def", "to pagination, this is only guaranteed as long as the", "header in response). \"\"\" # Setup the mocked response responses.add(responses.POST,", "@responses.activate def test_create_failure_missing_location_header(self): \"\"\" The function should return an error", "f\"{self.api_url}?name=example.com\" domain = Domain(client=self.client) data = domain.find(name=\"example.com\") # Verify all", "[ {\"id\": 1234, \"name\": \"example.com\"}, {\"id\": 4321, \"name\": \"*.example.com\"}, {\"id\":", "domain_id) class TestCreate(TestDomain): \"\"\"Test the .create method.\"\"\" @responses.activate def test_need_params(self):", "the non-required parameters to the request body \"\"\" # Setup", "the mocked response responses.add(responses.GET, api_url, json=self.valid_response, status=200) domain = Domain(client=self.client,", "\"\"\"Define the cert_manager.domain.Domain unit tests.\"\"\" # Don't warn about things", "exception if domains cannot be retrieved from the API.\"\"\" #", "an error code and description if the Domain creation failed.", "\"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"SSL\"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate", "def test_no_params(self): \"\"\"Without parameters, the method will return all domains\"\"\"", "\"org_id\": 4321, \"cert_types\": [\"other\"] } self.assertRaises(ValueError, domain.create, **create_args) @responses.activate def", "\"\"\"The function should raise an HTTPError exception if counts cannot", "\"\"\"The function should return True if the delegation removal succeeded.\"\"\"", "self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response) def test_need_client(self): \"\"\"The class", "domain.create, **create_args) @responses.activate def test_create_failure_domain_id_not_found(self): \"\"\" The function should return", "domain.activate(domain_id) self.assertEqual(True, response) @responses.activate def test_activate_failure_http_error(self): \"\"\" The function should", "creation failed. \"\"\" # Setup the mocked response responses.add(responses.POST, self.api_url,", "HTTPError exception if the suspension failed. \"\"\" domain_id = 1234", "description if the Domain creation failed with DomainCreationResponseError (no Location", "should raise an HTTPError exception if the deletion failed. \"\"\"", "Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id) class TestCreate(TestDomain): \"\"\"Test the .create method.\"\"\"", "# Setup the mocked response api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url,", "# pylint: disable=too-few-public-methods \"\"\"Serve as a Base class for all", "mocked response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend,", "class TestDelete(TestDomain): \"\"\"Test the .delete method.\"\"\" @responses.activate def test_need_params(self): \"\"\"", "domain = Domain(client=self.client) # Not going to check every permutation", "additional params are specified, as well add the non-required parameters", "Domain(client=self.client) create_args = { \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"SSL\"]", "deletion failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" #", "pagination, this is only guaranteed as long as the number", "the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client)", "mocked response responses.add(responses.GET, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.get,", "missing domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate def test_delegate_success(self): \"\"\"The function should", "domain.delete, domain_id) class TestActivate(TestDomain): \"\"\"Test the .activate method.\"\"\" @responses.activate def", "= domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def test_params(self): \"\"\"Parameters", "Domain class.\"\"\" def setUp(self): # pylint: disable=invalid-name \"\"\"Initialize the class.\"\"\"", "domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id) class TestRejectDelegation(TestDomain): \"\"\"Test", "Verify all the query information # There should only be", "\"\"\"The function should return True if the deletion succeeded.\"\"\" domain_id", "f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=200) domain", "= Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id) class TestSuspend(TestDomain): \"\"\"Test the .suspend", "import ClientFixture class TestDomain(TestCase): # pylint: disable=too-few-public-methods \"\"\"Serve as a", "status=200) api_url = f\"{self.api_url}?name=example.com\" domain = Domain(client=self.client) data = domain.find(name=\"example.com\")", "exception without a client parameter.\"\"\" self.assertRaises(TypeError, Domain) class TestAll(TestDomain): \"\"\"Test", "1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain): \"\"\"Test the .count method.\"\"\" @responses.activate", "\"orgId\": org_id, \"certTypes\": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate", "URL should change if api_version is passed as a parameter.\"\"\"", "json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.all) # Verify all", "guaranteed as long as the number of # entries returned", "responses.add(responses.POST, self.api_url, headers={\"Location\": \"not a url\"}, status=201) domain = Domain(client=self.client)", "failed. \"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup", "# Setup the mocked response responses.add(responses.POST, self.api_url, headers={\"Location\": \"not a", "response responses.add(responses.GET, api_url, json=self.valid_response, status=200) domain = Domain(client=self.client, api_version=version) data", "domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def test_params(self): \"\"\"Parameters will", "the mocked response api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=self.error_response, status=400)", "f\"{self.api_url}/{str(domain_id)}\" responses.add(responses.POST, self.api_url, headers={\"Location\": location}, status=201) domain = Domain(client=self.client) post_data", "Domain(client=self.client) data = domain.find() self.assertEqual(data, self.valid_response) @responses.activate def test_params(self): \"\"\"Parameters", "= \"v3\" api_url = f\"{self.cfixt.base_url}/domain/{version}\" # Setup the mocked response", "api_url = f\"{self.api_url}?name=example.com\" domain = Domain(client=self.client) data = domain.find(name=\"example.com\") #", "domain.suspend) @responses.activate def test_suspend_success(self): \"\"\"The function should return True if", "4321, \"certTypes\": [\"SSL\"]}], \"description\": \"Example sub domain\" } response =", "domain.remove_delegation, domain_id, org_id, types) class TestApproveDelegation(TestDomain): \"\"\"Test the .approve_delegation method.\"\"\"", "Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def test_approve_delegation_success(self): \"\"\"The", "@responses.activate def test_activate_failure_http_error(self): \"\"\" The function should raise an HTTPError", "count = {\"count\": len(self.valid_response[0])} api_url = f\"{self.api_url}/count\" responses.add(responses.GET, api_url, json=count,", "response responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response = domain.activate(domain_id)", "class initializer.\"\"\" @responses.activate def test_param(self): \"\"\"The URL should change if", "response responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200) api_url = f\"{self.api_url}?name=example.com\" domain =", "def test_need_domain_id(self): \"\"\"The function should raise an exception without an", "self.api_url) class TestCount(TestDomain): \"\"\"Test the .count method.\"\"\" @responses.activate def test_no_params(self):", "create_args = { \"name\": \"sub2.example.com\", \"org_id\": 4321, \"cert_types\": [\"SSL\"] }", "test_approve_delegation_success(self): \"\"\"The function should return True if the approval succeeded.\"\"\"", "= [ {\"id\": 1234, \"name\": \"example.com\"}, {\"id\": 4321, \"name\": \"*.example.com\"},", "verify that something is required self.assertRaises(TypeError, domain.create) @responses.activate def test_create_success(self):", "Domain(client=self.client) response = domain.delegate(domain_id, org_id, types) post_data = { \"orgId\":", "# Setup the mocked response responses.add(responses.DELETE, api_url, status=404) domain =", "self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_approval_failure_http_error(self): \"\"\"The function should", "self.api_url, json=self.valid_response[0], status=200) api_url = f\"{self.api_url}?name=example.com\" domain = Domain(client=self.client) data", "domain.suspend, domain_id) class TestDelegate(TestDomain): \"\"\"Test the .delegate method.\"\"\" @responses.activate def", "\"\"\" domain_id = 1234 api_url = f\"{self.api_url}/{str(domain_id)}\" # Setup the", "the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain): \"\"\"Test", "responses.add(responses.GET, api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.count) #", "test_create_success_optional_params(self): \"\"\" The function should return the created domain ID", "should change if api_version is passed as a parameter.\"\"\" #", "counts cannot be retrieved from the API.\"\"\" # Setup the", "test_remove_delegation_failure_http_error(self): \"\"\"The function should raise an HTTPError exception if the", "api_url, json=count, status=200) domain = Domain(client=self.client) data = domain.count() self.assertEqual(data,", "the Domain creation failed with DomainCreationResponseError (no Location header in", "removal failed.\"\"\" domain_id = 1234 org_id = 4321 types =", "post_data = { \"name\": \"sub2.example.com\", \"delegations\": [{\"orgId\": 4321, \"certTypes\": [\"SSL\"]}],", "response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\")) @responses.activate def test_remove_delegation_failure_http_error(self): \"\"\"The function should raise", "self.valid_individual_response = self.valid_response[0] self.valid_individual_response[\"status\"] = \"Active\" # Setup JSON to", "def test_delegate_success(self): \"\"\"The function should return True if the delegation", "all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain):", "4321, [\"SSL\"], description=\"Example sub domain\") self.assertEqual(response, {\"id\": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode(\"utf8\"))", "warn about things that happen as that is part of", "missing domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate def test_suspend_success(self): \"\"\"The function should", "domain = Domain(client=self.client) response = domain.delegate(domain_id, org_id, types) post_data =", "that happen as that is part of unit testing #", "= f\"{self.api_url}/{str(domain_id)}/delegation\" # Setup the mocked response responses.add(responses.DELETE, api_url, status=200)", "api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate def test_ne_domain_id(self): \"\"\"The function should raise" ]
[ "'----------------------------------\\n'\\ '1 - да\\n' \\ '2 - нет' SUCCESS_STEP =", "'отмечены 10 островов. На каждом из островов \\n' \\ 'зарыт", "''Сундук сокровищ''!\\n' \\ 'Попробуй себя в роли капитана корабля, собери", "остались монеты, то можешь \\n' \\ 'попробовать организовать поход заново.", "капитана корабля, собери ' \\ 'команду и достань все сокровища!'", "А дядюшка в тебя верил! \\n' \\ 'Конец игры.' NAMES", "игры - добыть все сокровища и скопить как можно больше", "<reponame>ProtKsen/pgame \"\"\"Text parts.\"\"\" SEPARATOR = '----------------------------------' CONT_GAME = 'enter для", "остров тебе понадобится \\n' \\ 'команда, а нанять ее ты", "поход заново. Удачи!' WINNING = 'Поздравляю! Ты собрал сокровища со", "\\ '----------------------------------\\n'\\ '1 - я передумал, буду сам себе оракул!", "Ты смог достать спрятанное сокровище! \\n' \\ 'Самое время готовиться", "= 'enter для продолжения игры' GREETING = 'Добро пожаловать в", "'Степан', 'Грозный Глаз', 'Гарри', 'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис',", "= ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри', 'Моррис', 'Джек',", "'Добро пожаловать в игру ''Сундук сокровищ''!\\n' \\ 'Попробуй себя в", "\\ '----------------------------------\\n'\\ '1 - да\\n' \\ '2 - нет' SUCCESS_STEP", "'Поздравляю! Ты смог достать спрятанное сокровище! \\n' \\ 'Самое время", "сколько очков логики должно быть у команды? (1 монета) \\n'\\", "тебе неизвестна. !!!' ORACLE_QUESTION = 'Здесь неподалеку живет известный оракул.", "тебя еще остались монеты, то можешь \\n' \\ 'попробовать организовать", "'1 - да\\n' \\ '2 - нет' SUCCESS_STEP = 'Поздравляю!", "дядюшки тебе достался корабль, \\n' \\ 'несколько золотых монет и", "ORACLE_QUESTION_2 = 'Что ты хочешь узнать у оракула? \\n' \\", "тем труднее его получить. \\n\\n' \\ 'Цель игры - добыть", "на островах. \\n' \\ '1 - легко \\n' \\ '2", "он сможет предсказать с какой ловушкой\\n' \\ 'ты столкнешься на", "Чем больше \\n' \\ 'порядковый номер острова, тем ценнее хранящееся", "'Цель игры - добыть все сокровища и скопить как можно", "'Конец игры.' LOSING = 'Сожалею, ты потратил все деньги. Карьера", "ловушкой\\n' \\ 'ты столкнешься на острове. Пойдешь ли ты к", "\\n' \\ 'островов, можешь выкинуть ненужную теперь карту) \\n' \\", "'на нем сокровище и тем труднее его получить. \\n\\n' \\", "\\n' \\ 'Конец игры.' LOSING = 'Сожалею, ты потратил все", "у оракула? \\n' \\ '----------------------------------\\n'\\ '1 - я передумал, буду", "добыть все сокровища и скопить как можно больше монет. \\n\\n'", "\\ '3 - тяжело' INTRODUCTION = 'В наследство от дядюшки", "больше \\n' \\ 'порядковый номер острова, тем ценнее хранящееся \\n'", "'В наследство от дядюшки тебе достался корабль, \\n' \\ 'несколько", "организовать поход заново. Удачи!' WINNING = 'Поздравляю! Ты собрал сокровища", "= '----------------------------------' CONT_GAME = 'enter для продолжения игры' GREETING =", "= 'Добро пожаловать в игру ''Сундук сокровищ''!\\n' \\ 'Попробуй себя", "\\ 'порядковый номер острова, тем ценнее хранящееся \\n' \\ 'на", "все сокровища!' NAME_QUESTION = 'Как тебя зовут?' CHOOSE_LEVEL = 'Выбери", "у команды? (1 монета) \\n'\\ '3 - сколько очков силы", "\\ 'отмечены 10 островов. На каждом из островов \\n' \\", "- легко \\n' \\ '2 - средне \\n' \\ '3", "скопить как можно больше монет. \\n\\n' \\ 'Команда твоего корабля", "\\n' \\ 'несколько золотых монет и карта, на которой \\n'", "10 островов. На каждом из островов \\n' \\ 'зарыт клад.", "быть у команды? (1 монета)' GO_TAVERN_TEXT = 'Отлично! Для похода", "на острове. Пойдешь ли ты к нему?\\n' \\ '----------------------------------\\n'\\ '1", "спрятанное сокровище! \\n' \\ 'Самое время готовиться к следующему походу.'", "NAME_QUESTION = 'Как тебя зовут?' CHOOSE_LEVEL = 'Выбери уровень сложности,", "ORACLE_QUESTION = 'Здесь неподалеку живет известный оракул. За определенную\\n' \\", "определенную\\n' \\ 'плату он сможет предсказать с какой ловушкой\\n' \\", "CHOOSE_LEVEL = 'Выбери уровень сложности, он влияет на стоимость '", "и тем труднее его получить. \\n\\n' \\ 'Цель игры -", "'Что ты хочешь узнать у оракула? \\n' \\ '----------------------------------\\n'\\ '1", "- нет, сам разберусь' ORACLE_QUESTION_1 = 'Что ты хочешь узнать", "'капитана подошла к концу. А дядюшка в тебя верил! \\n'", "себя в роли капитана корабля, собери ' \\ 'команду и", "хочешь узнать у оракула? \\n' \\ '----------------------------------\\n'\\ '1 - я", "\\n'\\ '2 - сколько очков логики должно быть у команды?", "его получить. \\n\\n' \\ 'Цель игры - добыть все сокровища", "INTRODUCTION = 'В наследство от дядюшки тебе достался корабль, \\n'", "ее ты сможешь в таверне.' EXIT_QUESTION = 'Продолжить игру?\\n' \\", "игру?\\n' \\ '----------------------------------\\n'\\ '1 - да\\n' \\ '2 - нет'", "WINNING = 'Поздравляю! Ты собрал сокровища со всех окрестных \\n'", "\\n\\n' \\ '!!! Сумма всех требуемых очков равна номеру острова,\\n'", "монета) \\n'\\ '4 - сколько очков ловкости должно быть у", "Для похода на остров тебе понадобится \\n' \\ 'команда, а", "смог достать сокровище. \\n' \\ 'Если у тебя еще остались", "тебя зовут?' CHOOSE_LEVEL = 'Выбери уровень сложности, он влияет на", "силы должно быть у команды? (1 монета) \\n'\\ '4 -", "подошла к концу. А дядюшка в тебя верил! \\n' \\", "с какой ловушкой\\n' \\ 'ты столкнешься на острове. Пойдешь ли", "\\n' \\ '3 - тяжело' INTRODUCTION = 'В наследство от", "сам разберусь' ORACLE_QUESTION_1 = 'Что ты хочешь узнать у оракула?", "\\ 'ты столкнешься на острове. Пойдешь ли ты к нему?\\n'", "\\ 'несколько золотых монет и карта, на которой \\n' \\", "'2 - нет' SUCCESS_STEP = 'Поздравляю! Ты смог достать спрятанное", "- узнать все требуемые характеристики (3 монеты)' ORACLE_QUESTION_2 = 'Что", "ты сможешь в таверне.' EXIT_QUESTION = 'Продолжить игру?\\n' \\ '----------------------------------\\n'\\", "выкинуть ненужную теперь карту) \\n' \\ 'Конец игры.' LOSING =", "\\ 'Конец игры.' NAMES = ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный", "(1 монета)' GO_TAVERN_TEXT = 'Отлично! Для похода на остров тебе", "если будет иметь нужное количество очков \\n' \\ 'логики, силы", "на остров тебе понадобится \\n' \\ 'команда, а нанять ее", "сокровище! \\n' \\ 'Самое время готовиться к следующему походу.' FAILURE_STEP", "узнать все требуемые характеристики (3 монеты)' ORACLE_QUESTION_2 = 'Что ты", "а нанять ее ты сможешь в таверне.' EXIT_QUESTION = 'Продолжить", "\\ 'Конец игры.' LOSING = 'Сожалею, ты потратил все деньги.", "себе оракул! \\n'\\ '2 - сколько очков логики должно быть", "живет известный оракул. За определенную\\n' \\ 'плату он сможет предсказать", "ты потратил все деньги. Карьера пиратского \\n' \\ 'капитана подошла", "- сколько очков логики должно быть у команды? (1 монета)", "все требуемые характеристики (3 монеты)' ORACLE_QUESTION_2 = 'Что ты хочешь", "и ловкости. \\n\\n' \\ '!!! Сумма всех требуемых очков равна", "в игру ''Сундук сокровищ''!\\n' \\ 'Попробуй себя в роли капитана", "сокровище и тем труднее его получить. \\n\\n' \\ 'Цель игры", "- сколько очков силы должно быть у команды? (1 монета)", "игры.' LOSING = 'Сожалею, ты потратил все деньги. Карьера пиратского", "'ты столкнешься на острове. Пойдешь ли ты к нему?\\n' \\", "'1 - я передумал, буду сам себе оракул! \\n'\\ '2", "средне \\n' \\ '3 - тяжело' INTRODUCTION = 'В наследство", "\\n'\\ '3 - сколько очков силы должно быть у команды?", "понадобится \\n' \\ 'команда, а нанять ее ты сможешь в", "\\ 'но точная комбинация тебе неизвестна. !!!' ORACLE_QUESTION = 'Здесь", "игры.' NAMES = ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри',", "острове. Пойдешь ли ты к нему?\\n' \\ '----------------------------------\\n'\\ '1 -", "сколько очков силы должно быть у команды? (1 монета) \\n'\\", "'Выбери уровень сложности, он влияет на стоимость ' \\ 'сокровищ", "монета)' GO_TAVERN_TEXT = 'Отлично! Для похода на остров тебе понадобится", "к концу. А дядюшка в тебя верил! \\n' \\ 'Конец", "LOSING = 'Сожалею, ты потратил все деньги. Карьера пиратского \\n'", "количество очков \\n' \\ 'логики, силы и ловкости. \\n\\n' \\", "твоего корабля сможет обезвредить ловушку, \\n' \\ 'только если будет", "' \\ 'сокровищ на островах. \\n' \\ '1 - легко", "'Отлично! Для похода на остров тебе понадобится \\n' \\ 'команда,", "\\n' \\ 'на нем сокровище и тем труднее его получить.", "ловушку. Чем больше \\n' \\ 'порядковый номер острова, тем ценнее", "'Алан', 'Степан', 'Грозный Глаз', 'Гарри', 'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том',", "\\ 'Цель игры - добыть все сокровища и скопить как", "ORACLE_QUESTION_1 = 'Что ты хочешь узнать у оракула? \\n' \\", "у тебя еще остались монеты, то можешь \\n' \\ 'попробовать", "должно быть у команды? (1 монета) \\n'\\ '4 - сколько", "'но точная комбинация тебе неизвестна. !!!' ORACLE_QUESTION = 'Здесь неподалеку", "\\n' \\ 'зарыт клад. Но для того, чтобы достать его,", "достать спрятанное сокровище! \\n' \\ 'Самое время готовиться к следующему", "в тебя верил! \\n' \\ 'Конец игры.' NAMES = ['Боб',", "как можно больше монет. \\n\\n' \\ 'Команда твоего корабля сможет", "передумал, буду сам себе оракул! \\n'\\ '2 - сколько очков", "'зарыт клад. Но для того, чтобы достать его, \\n' \\", "- да\\n' \\ '2 - нет' SUCCESS_STEP = 'Поздравляю! Ты", "чтобы достать его, \\n' \\ 'необходимо обезвредить ловушку. Чем больше", "'логики, силы и ловкости. \\n\\n' \\ '!!! Сумма всех требуемых", "получить. \\n\\n' \\ 'Цель игры - добыть все сокровища и", "да\\n' \\ '2 - нет' SUCCESS_STEP = 'Поздравляю! Ты смог", "можно больше монет. \\n\\n' \\ 'Команда твоего корабля сможет обезвредить", "известный оракул. За определенную\\n' \\ 'плату он сможет предсказать с", "к следующему походу.' FAILURE_STEP = 'К сожалению, ты не смог", "требуемых очков равна номеру острова,\\n' \\ 'но точная комбинация тебе", "\\ 'островов, можешь выкинуть ненужную теперь карту) \\n' \\ 'Конец", "= 'К сожалению, ты не смог достать сокровище. \\n' \\", "нужное количество очков \\n' \\ 'логики, силы и ловкости. \\n\\n'", "тебе достался корабль, \\n' \\ 'несколько золотых монет и карта,", "'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт', 'Ринсвинд', 'Купер', 'Борис',", "\\ 'на нем сокровище и тем труднее его получить. \\n\\n'", "обезвредить ловушку, \\n' \\ 'только если будет иметь нужное количество", "смог достать спрятанное сокровище! \\n' \\ 'Самое время готовиться к", "монета) \\n'\\ '3 - сколько очков силы должно быть у", "со всех окрестных \\n' \\ 'островов, можешь выкинуть ненужную теперь", "быть у команды? (1 монета) \\n'\\ '3 - сколько очков", "'2 - сколько очков логики должно быть у команды? (1", "очков ловкости должно быть у команды? (1 монета)' GO_TAVERN_TEXT =", "влияет на стоимость ' \\ 'сокровищ на островах. \\n' \\", "\\n' \\ '1 - легко \\n' \\ '2 - средне", "'Здесь неподалеку живет известный оракул. За определенную\\n' \\ 'плату он", "иметь нужное количество очков \\n' \\ 'логики, силы и ловкости.", "= 'Что ты хочешь узнать у оракула? \\n' \\ '----------------------------------\\n'\\", "которой \\n' \\ 'отмечены 10 островов. На каждом из островов", "островов \\n' \\ 'зарыт клад. Но для того, чтобы достать", "\\ '----------------------------------\\n'\\ '1 - да, пойду\\n' \\ '2 - нет,", "'enter для продолжения игры' GREETING = 'Добро пожаловать в игру", "- сколько очков ловкости должно быть у команды? (1 монета)", "'1 - легко \\n' \\ '2 - средне \\n' \\", "карта, на которой \\n' \\ 'отмечены 10 островов. На каждом", "тебе понадобится \\n' \\ 'команда, а нанять ее ты сможешь", "SUCCESS_STEP = 'Поздравляю! Ты смог достать спрятанное сокровище! \\n' \\", "труднее его получить. \\n\\n' \\ 'Цель игры - добыть все", "время готовиться к следующему походу.' FAILURE_STEP = 'К сожалению, ты", "клад. Но для того, чтобы достать его, \\n' \\ 'необходимо", "\\ 'попробовать организовать поход заново. Удачи!' WINNING = 'Поздравляю! Ты", "Удачи!' WINNING = 'Поздравляю! Ты собрал сокровища со всех окрестных", "FAILURE_STEP = 'К сожалению, ты не смог достать сокровище. \\n'", "продолжения игры' GREETING = 'Добро пожаловать в игру ''Сундук сокровищ''!\\n'", "легко \\n' \\ '2 - средне \\n' \\ '3 -", "должно быть у команды? (1 монета)' GO_TAVERN_TEXT = 'Отлично! Для", "нанять ее ты сможешь в таверне.' EXIT_QUESTION = 'Продолжить игру?\\n'", "\\n'\\ '4 - сколько очков ловкости должно быть у команды?", "для продолжения игры' GREETING = 'Добро пожаловать в игру ''Сундук", "окрестных \\n' \\ 'островов, можешь выкинуть ненужную теперь карту) \\n'", "'Поздравляю! Ты собрал сокровища со всех окрестных \\n' \\ 'островов,", "в таверне.' EXIT_QUESTION = 'Продолжить игру?\\n' \\ '----------------------------------\\n'\\ '1 -", "ловкости. \\n\\n' \\ '!!! Сумма всех требуемых очков равна номеру", "Сумма всех требуемых очков равна номеру острова,\\n' \\ 'но точная", "'островов, можешь выкинуть ненужную теперь карту) \\n' \\ 'Конец игры.'", "\\n' \\ 'Если у тебя еще остались монеты, то можешь", "пойду\\n' \\ '2 - нет, сам разберусь' ORACLE_QUESTION_1 = 'Что", "= 'Продолжить игру?\\n' \\ '----------------------------------\\n'\\ '1 - да\\n' \\ '2", "походу.' FAILURE_STEP = 'К сожалению, ты не смог достать сокровище.", "\\ 'необходимо обезвредить ловушку. Чем больше \\n' \\ 'порядковый номер", "и скопить как можно больше монет. \\n\\n' \\ 'Команда твоего", "ловкости должно быть у команды? (1 монета) \\n'\\ '5 -", "'Если у тебя еще остались монеты, то можешь \\n' \\", "GREETING = 'Добро пожаловать в игру ''Сундук сокровищ''!\\n' \\ 'Попробуй", "ты не смог достать сокровище. \\n' \\ 'Если у тебя", "= 'Отлично! Для похода на остров тебе понадобится \\n' \\", "'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт', 'Ринсвинд', 'Купер', 'Борис', 'Джон',", "сможет обезвредить ловушку, \\n' \\ 'только если будет иметь нужное", "\\ '2 - нет, сам разберусь' ORACLE_QUESTION_1 = 'Что ты", "нет' SUCCESS_STEP = 'Поздравляю! Ты смог достать спрятанное сокровище! \\n'", "больше монет. \\n\\n' \\ 'Команда твоего корабля сможет обезвредить ловушку,", "ты хочешь узнать у оракула? \\n' \\ '----------------------------------\\n'\\ '1 -", "'порядковый номер острова, тем ценнее хранящееся \\n' \\ 'на нем", "монет. \\n\\n' \\ 'Команда твоего корабля сможет обезвредить ловушку, \\n'", "'только если будет иметь нужное количество очков \\n' \\ 'логики,", "\\ 'сокровищ на островах. \\n' \\ '1 - легко \\n'", "parts.\"\"\" SEPARATOR = '----------------------------------' CONT_GAME = 'enter для продолжения игры'", "= 'Выбери уровень сложности, он влияет на стоимость ' \\", "EXIT_QUESTION = 'Продолжить игру?\\n' \\ '----------------------------------\\n'\\ '1 - да\\n' \\", "всех окрестных \\n' \\ 'островов, можешь выкинуть ненужную теперь карту)", "\\ 'только если будет иметь нужное количество очков \\n' \\", "\\n\\n' \\ 'Цель игры - добыть все сокровища и скопить", "в роли капитана корабля, собери ' \\ 'команду и достань", "\"\"\"Text parts.\"\"\" SEPARATOR = '----------------------------------' CONT_GAME = 'enter для продолжения", "\\n'\\ '5 - узнать все требуемые характеристики (3 монеты)' ORACLE_QUESTION_2", "(1 монета) \\n'\\ '5 - узнать все требуемые характеристики (3", "хранящееся \\n' \\ 'на нем сокровище и тем труднее его", "номеру острова,\\n' \\ 'но точная комбинация тебе неизвестна. !!!' ORACLE_QUESTION", "равна номеру острова,\\n' \\ 'но точная комбинация тебе неизвестна. !!!'", "\\n' \\ 'только если будет иметь нужное количество очков \\n'", "Ты собрал сокровища со всех окрестных \\n' \\ 'островов, можешь", "\\n' \\ 'капитана подошла к концу. А дядюшка в тебя", "у команды? (1 монета)' GO_TAVERN_TEXT = 'Отлично! Для похода на", "монета) \\n'\\ '5 - узнать все требуемые характеристики (3 монеты)'", "он влияет на стоимость ' \\ 'сокровищ на островах. \\n'", "- добыть все сокровища и скопить как можно больше монет.", "Глаз', 'Гарри', 'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт', 'Ринсвинд',", "нет, сам разберусь' ORACLE_QUESTION_1 = 'Что ты хочешь узнать у", "тем ценнее хранящееся \\n' \\ 'на нем сокровище и тем", "сокровище. \\n' \\ 'Если у тебя еще остались монеты, то", "\\n' \\ 'необходимо обезвредить ловушку. Чем больше \\n' \\ 'порядковый", "достать сокровище. \\n' \\ 'Если у тебя еще остались монеты,", "требуемые характеристики (3 монеты)' ORACLE_QUESTION_2 = 'Что ты хочешь узнать", "острова,\\n' \\ 'но точная комбинация тебе неизвестна. !!!' ORACLE_QUESTION =", "\\ '2 - средне \\n' \\ '3 - тяжело' INTRODUCTION", "зовут?' CHOOSE_LEVEL = 'Выбери уровень сложности, он влияет на стоимость", "потратил все деньги. Карьера пиратского \\n' \\ 'капитана подошла к", "'Конец игры.' NAMES = ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз',", "['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри', 'Моррис', 'Джек', 'Алекс',", "игры' GREETING = 'Добро пожаловать в игру ''Сундук сокровищ''!\\n' \\", "уровень сложности, он влияет на стоимость ' \\ 'сокровищ на", "наследство от дядюшки тебе достался корабль, \\n' \\ 'несколько золотых", "на которой \\n' \\ 'отмечены 10 островов. На каждом из", "буду сам себе оракул! \\n'\\ '2 - сколько очков логики", "(1 монета) \\n'\\ '4 - сколько очков ловкости должно быть", "\\n' \\ '2 - средне \\n' \\ '3 - тяжело'", "CONT_GAME = 'enter для продолжения игры' GREETING = 'Добро пожаловать", "'К сожалению, ты не смог достать сокровище. \\n' \\ 'Если", "сокровища!' NAME_QUESTION = 'Как тебя зовут?' CHOOSE_LEVEL = 'Выбери уровень", "того, чтобы достать его, \\n' \\ 'необходимо обезвредить ловушку. Чем", "достался корабль, \\n' \\ 'несколько золотых монет и карта, на", "его, \\n' \\ 'необходимо обезвредить ловушку. Чем больше \\n' \\", "должно быть у команды? (1 монета) \\n'\\ '5 - узнать", "'5 - узнать все требуемые характеристики (3 монеты)' ORACLE_QUESTION_2 =", "NAMES = ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри', 'Моррис',", "'----------------------------------\\n'\\ '1 - да, пойду\\n' \\ '2 - нет, сам", "\\n' \\ 'попробовать организовать поход заново. Удачи!' WINNING = 'Поздравляю!", "концу. А дядюшка в тебя верил! \\n' \\ 'Конец игры.'", "ценнее хранящееся \\n' \\ 'на нем сокровище и тем труднее", "Пойдешь ли ты к нему?\\n' \\ '----------------------------------\\n'\\ '1 - да,", "силы и ловкости. \\n\\n' \\ '!!! Сумма всех требуемых очков", "\\n' \\ '----------------------------------\\n'\\ '1 - я передумал, буду сам себе", "острова, тем ценнее хранящееся \\n' \\ 'на нем сокровище и", "оракул. За определенную\\n' \\ 'плату он сможет предсказать с какой", "тебя верил! \\n' \\ 'Конец игры.' NAMES = ['Боб', 'Ричард',", "= 'В наследство от дядюшки тебе достался корабль, \\n' \\", "За определенную\\n' \\ 'плату он сможет предсказать с какой ловушкой\\n'", "от дядюшки тебе достался корабль, \\n' \\ 'несколько золотых монет", "'3 - тяжело' INTRODUCTION = 'В наследство от дядюшки тебе", "из островов \\n' \\ 'зарыт клад. Но для того, чтобы", "\\n' \\ 'команда, а нанять ее ты сможешь в таверне.'", "таверне.' EXIT_QUESTION = 'Продолжить игру?\\n' \\ '----------------------------------\\n'\\ '1 - да\\n'", "корабль, \\n' \\ 'несколько золотых монет и карта, на которой", "\\n' \\ 'отмечены 10 островов. На каждом из островов \\n'", "островах. \\n' \\ '1 - легко \\n' \\ '2 -", "- средне \\n' \\ '3 - тяжело' INTRODUCTION = 'В", "предсказать с какой ловушкой\\n' \\ 'ты столкнешься на острове. Пойдешь", "достать его, \\n' \\ 'необходимо обезвредить ловушку. Чем больше \\n'", "все деньги. Карьера пиратского \\n' \\ 'капитана подошла к концу.", "логики должно быть у команды? (1 монета) \\n'\\ '3 -", "'!!! Сумма всех требуемых очков равна номеру острова,\\n' \\ 'но", "и карта, на которой \\n' \\ 'отмечены 10 островов. На", "сожалению, ты не смог достать сокровище. \\n' \\ 'Если у", "теперь карту) \\n' \\ 'Конец игры.' LOSING = 'Сожалею, ты", "оракул! \\n'\\ '2 - сколько очков логики должно быть у", "очков \\n' \\ 'логики, силы и ловкости. \\n\\n' \\ '!!!", "\\ 'Команда твоего корабля сможет обезвредить ловушку, \\n' \\ 'только", "команды? (1 монета)' GO_TAVERN_TEXT = 'Отлично! Для похода на остров", "пиратского \\n' \\ 'капитана подошла к концу. А дядюшка в", "оракула? \\n' \\ '----------------------------------\\n'\\ '1 - я передумал, буду сам", "столкнешься на острове. Пойдешь ли ты к нему?\\n' \\ '----------------------------------\\n'\\", "\\n' \\ 'Самое время готовиться к следующему походу.' FAILURE_STEP =", "карту) \\n' \\ 'Конец игры.' LOSING = 'Сожалею, ты потратил", "\\ '!!! Сумма всех требуемых очков равна номеру острова,\\n' \\", "сможет предсказать с какой ловушкой\\n' \\ 'ты столкнешься на острове.", "ли ты к нему?\\n' \\ '----------------------------------\\n'\\ '1 - да, пойду\\n'", "будет иметь нужное количество очков \\n' \\ 'логики, силы и", "'----------------------------------\\n'\\ '1 - я передумал, буду сам себе оракул! \\n'\\", "\\ '2 - нет' SUCCESS_STEP = 'Поздравляю! Ты смог достать", "я передумал, буду сам себе оракул! \\n'\\ '2 - сколько", "\\n' \\ 'порядковый номер острова, тем ценнее хранящееся \\n' \\", "собрал сокровища со всех окрестных \\n' \\ 'островов, можешь выкинуть", "'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт', 'Ринсвинд', 'Купер', 'Борис', 'Джон', 'Рон']", "корабля, собери ' \\ 'команду и достань все сокровища!' NAME_QUESTION", "комбинация тебе неизвестна. !!!' ORACLE_QUESTION = 'Здесь неподалеку живет известный", "'сокровищ на островах. \\n' \\ '1 - легко \\n' \\", "у команды? (1 монета) \\n'\\ '4 - сколько очков ловкости", "всех требуемых очков равна номеру острова,\\n' \\ 'но точная комбинация", "'попробовать организовать поход заново. Удачи!' WINNING = 'Поздравляю! Ты собрал", "сложности, он влияет на стоимость ' \\ 'сокровищ на островах.", "быть у команды? (1 монета) \\n'\\ '5 - узнать все", "роли капитана корабля, собери ' \\ 'команду и достань все", "- да, пойду\\n' \\ '2 - нет, сам разберусь' ORACLE_QUESTION_1", "'2 - средне \\n' \\ '3 - тяжело' INTRODUCTION =", "монеты)' ORACLE_QUESTION_2 = 'Что ты хочешь узнать у оракула? \\n'", "можешь \\n' \\ 'попробовать организовать поход заново. Удачи!' WINNING =", "готовиться к следующему походу.' FAILURE_STEP = 'К сожалению, ты не", "'3 - сколько очков силы должно быть у команды? (1", "и достань все сокровища!' NAME_QUESTION = 'Как тебя зовут?' CHOOSE_LEVEL", "характеристики (3 монеты)' ORACLE_QUESTION_2 = 'Что ты хочешь узнать у", "неизвестна. !!!' ORACLE_QUESTION = 'Здесь неподалеку живет известный оракул. За", "на стоимость ' \\ 'сокровищ на островах. \\n' \\ '1", "достань все сокровища!' NAME_QUESTION = 'Как тебя зовут?' CHOOSE_LEVEL =", "'несколько золотых монет и карта, на которой \\n' \\ 'отмечены", "деньги. Карьера пиратского \\n' \\ 'капитана подошла к концу. А", "очков логики должно быть у команды? (1 монета) \\n'\\ '3", "тяжело' INTRODUCTION = 'В наследство от дядюшки тебе достался корабль,", "какой ловушкой\\n' \\ 'ты столкнешься на острове. Пойдешь ли ты", "сам себе оракул! \\n'\\ '2 - сколько очков логики должно", "- сколько очков ловкости должно быть у команды? (1 монета)'", "'команду и достань все сокровища!' NAME_QUESTION = 'Как тебя зовут?'", "\\ 'зарыт клад. Но для того, чтобы достать его, \\n'", "'плату он сможет предсказать с какой ловушкой\\n' \\ 'ты столкнешься", "'Сожалею, ты потратил все деньги. Карьера пиратского \\n' \\ 'капитана", "все сокровища и скопить как можно больше монет. \\n\\n' \\", "\\n' \\ 'Конец игры.' NAMES = ['Боб', 'Ричард', 'Алан', 'Степан',", "нем сокровище и тем труднее его получить. \\n\\n' \\ 'Цель", "золотых монет и карта, на которой \\n' \\ 'отмечены 10", "быть у команды? (1 монета) \\n'\\ '4 - сколько очков", "\\ 'Самое время готовиться к следующему походу.' FAILURE_STEP = 'К", "то можешь \\n' \\ 'попробовать организовать поход заново. Удачи!' WINNING", "\\ 'Если у тебя еще остались монеты, то можешь \\n'", "сможешь в таверне.' EXIT_QUESTION = 'Продолжить игру?\\n' \\ '----------------------------------\\n'\\ '1", "(1 монета) \\n'\\ '3 - сколько очков силы должно быть", "'Попробуй себя в роли капитана корабля, собери ' \\ 'команду", "' \\ 'команду и достань все сокровища!' NAME_QUESTION = 'Как", "не смог достать сокровище. \\n' \\ 'Если у тебя еще", "= 'Поздравляю! Ты собрал сокровища со всех окрестных \\n' \\", "ненужную теперь карту) \\n' \\ 'Конец игры.' LOSING = 'Сожалею,", "\\ '1 - легко \\n' \\ '2 - средне \\n'", "к нему?\\n' \\ '----------------------------------\\n'\\ '1 - да, пойду\\n' \\ '2", "Но для того, чтобы достать его, \\n' \\ 'необходимо обезвредить", "ловкости должно быть у команды? (1 монета)' GO_TAVERN_TEXT = 'Отлично!", "Карьера пиратского \\n' \\ 'капитана подошла к концу. А дядюшка", "команды? (1 монета) \\n'\\ '5 - узнать все требуемые характеристики", "корабля сможет обезвредить ловушку, \\n' \\ 'только если будет иметь", "'Грозный Глаз', 'Гарри', 'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт',", "\\ 'команда, а нанять ее ты сможешь в таверне.' EXIT_QUESTION", "\\ 'Попробуй себя в роли капитана корабля, собери ' \\", "для того, чтобы достать его, \\n' \\ 'необходимо обезвредить ловушку.", "очков равна номеру острова,\\n' \\ 'но точная комбинация тебе неизвестна.", "сокровища и скопить как можно больше монет. \\n\\n' \\ 'Команда", "= 'Сожалею, ты потратил все деньги. Карьера пиратского \\n' \\", "пожаловать в игру ''Сундук сокровищ''!\\n' \\ 'Попробуй себя в роли", "\\ 'капитана подошла к концу. А дядюшка в тебя верил!", "'необходимо обезвредить ловушку. Чем больше \\n' \\ 'порядковый номер острова,", "номер острова, тем ценнее хранящееся \\n' \\ 'на нем сокровище", "\\n' \\ 'логики, силы и ловкости. \\n\\n' \\ '!!! Сумма", "сколько очков ловкости должно быть у команды? (1 монета)' GO_TAVERN_TEXT", "ловушку, \\n' \\ 'только если будет иметь нужное количество очков", "SEPARATOR = '----------------------------------' CONT_GAME = 'enter для продолжения игры' GREETING", "(3 монеты)' ORACLE_QUESTION_2 = 'Что ты хочешь узнать у оракула?", "\\n\\n' \\ 'Команда твоего корабля сможет обезвредить ловушку, \\n' \\", "дядюшка в тебя верил! \\n' \\ 'Конец игры.' NAMES =", "точная комбинация тебе неизвестна. !!!' ORACLE_QUESTION = 'Здесь неподалеку живет", "очков ловкости должно быть у команды? (1 монета) \\n'\\ '5", "'Команда твоего корабля сможет обезвредить ловушку, \\n' \\ 'только если", "монеты, то можешь \\n' \\ 'попробовать организовать поход заново. Удачи!'", "\\ 'команду и достань все сокровища!' NAME_QUESTION = 'Как тебя", "= 'Как тебя зовут?' CHOOSE_LEVEL = 'Выбери уровень сложности, он", "'2 - нет, сам разберусь' ORACLE_QUESTION_1 = 'Что ты хочешь", "похода на остров тебе понадобится \\n' \\ 'команда, а нанять", "нему?\\n' \\ '----------------------------------\\n'\\ '1 - да, пойду\\n' \\ '2 -", "!!!' ORACLE_QUESTION = 'Здесь неподалеку живет известный оракул. За определенную\\n'", "заново. Удачи!' WINNING = 'Поздравляю! Ты собрал сокровища со всех", "'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри', 'Моррис', 'Джек', 'Алекс', 'Сэм',", "должно быть у команды? (1 монета) \\n'\\ '3 - сколько", "= 'Поздравляю! Ты смог достать спрятанное сокровище! \\n' \\ 'Самое", "сокровищ''!\\n' \\ 'Попробуй себя в роли капитана корабля, собери '", "да, пойду\\n' \\ '2 - нет, сам разберусь' ORACLE_QUESTION_1 =", "\\ 'логики, силы и ловкости. \\n\\n' \\ '!!! Сумма всех", "каждом из островов \\n' \\ 'зарыт клад. Но для того,", "следующему походу.' FAILURE_STEP = 'К сожалению, ты не смог достать", "= 'Здесь неподалеку живет известный оракул. За определенную\\n' \\ 'плату", "команды? (1 монета) \\n'\\ '3 - сколько очков силы должно", "'4 - сколько очков ловкости должно быть у команды? (1", "сокровища со всех окрестных \\n' \\ 'островов, можешь выкинуть ненужную", "разберусь' ORACLE_QUESTION_1 = 'Что ты хочешь узнать у оракула? \\n'", "узнать у оракула? \\n' \\ '----------------------------------\\n'\\ '1 - я передумал,", "GO_TAVERN_TEXT = 'Отлично! Для похода на остров тебе понадобится \\n'", "'Продолжить игру?\\n' \\ '----------------------------------\\n'\\ '1 - да\\n' \\ '2 -", "команды? (1 монета) \\n'\\ '4 - сколько очков ловкости должно", "игру ''Сундук сокровищ''!\\n' \\ 'Попробуй себя в роли капитана корабля,", "\\ 'плату он сможет предсказать с какой ловушкой\\n' \\ 'ты", "'команда, а нанять ее ты сможешь в таверне.' EXIT_QUESTION =", "еще остались монеты, то можешь \\n' \\ 'попробовать организовать поход", "'1 - да, пойду\\n' \\ '2 - нет, сам разберусь'", "На каждом из островов \\n' \\ 'зарыт клад. Но для", "'Самое время готовиться к следующему походу.' FAILURE_STEP = 'К сожалению,", "- я передумал, буду сам себе оракул! \\n'\\ '2 -", "'----------------------------------' CONT_GAME = 'enter для продолжения игры' GREETING = 'Добро", "'Гарри', 'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт', 'Ринсвинд', 'Купер',", "- нет' SUCCESS_STEP = 'Поздравляю! Ты смог достать спрятанное сокровище!", "обезвредить ловушку. Чем больше \\n' \\ 'порядковый номер острова, тем", "очков силы должно быть у команды? (1 монета) \\n'\\ '4", "у команды? (1 монета) \\n'\\ '5 - узнать все требуемые", "можешь выкинуть ненужную теперь карту) \\n' \\ 'Конец игры.' LOSING", "монет и карта, на которой \\n' \\ 'отмечены 10 островов.", "островов. На каждом из островов \\n' \\ 'зарыт клад. Но", "верил! \\n' \\ 'Конец игры.' NAMES = ['Боб', 'Ричард', 'Алан',", "стоимость ' \\ 'сокровищ на островах. \\n' \\ '1 -", "неподалеку живет известный оракул. За определенную\\n' \\ 'плату он сможет", "сколько очков ловкости должно быть у команды? (1 монета) \\n'\\", "'Как тебя зовут?' CHOOSE_LEVEL = 'Выбери уровень сложности, он влияет", "собери ' \\ 'команду и достань все сокровища!' NAME_QUESTION =", "ты к нему?\\n' \\ '----------------------------------\\n'\\ '1 - да, пойду\\n' \\", "- тяжело' INTRODUCTION = 'В наследство от дядюшки тебе достался" ]
[ "verbose_name='ID')), ('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user',", "], ), migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'),", "default='')), ('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')),", "name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png',", "00:42 from django.conf import settings from django.db import migrations, models", "on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True,", "serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True, default='')),", "), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture',", "Django 3.1.3 on 2021-01-07 00:42 from django.conf import settings from", "('description', models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(", "migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'), ), migrations.AddConstraint( model_name='like', constraint=models.UniqueConstraint(fields=('image',", "migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True,", "primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[", "('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),", "models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True", "('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='subscription',", "to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')), ('description',", "django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies =", "on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment',", "name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')),", "related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True,", "constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'), ), migrations.AddConstraint( model_name='like', constraint=models.UniqueConstraint(fields=('image', 'user'), name='unique", "migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial =", "('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "= [ migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile',", "to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[", "<gh_stars>0 # Generated by Django 3.1.3 on 2021-01-07 00:42 from", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id',", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint(", "3.1.3 on 2021-01-07 00:42 from django.conf import settings from django.db", "('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[ ('id',", "], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True,", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True,", "models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True,", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Image',", "], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "subscribes'), ), migrations.AddConstraint( model_name='like', constraint=models.UniqueConstraint(fields=('image', 'user'), name='unique likes'), ), ]", "default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Like', fields=[", "verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Image', fields=[", "], ), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'), ), migrations.AddConstraint(", "models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user',", "), migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image',", "migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True,", "settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone", "initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique", "django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration):", "upload_to='')), ('description', models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),", "serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')), ('description', models.TextField(blank=True, default='')),", "django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True,", "to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user',", "import settings from django.db import migrations, models import django.db.models.deletion import", "('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user',", "serialize=False, verbose_name='ID')), ('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')),", "('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),", "verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)), ('comments',", "2021-01-07 00:42 from django.conf import settings from django.db import migrations,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)),", "('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Like', fields=[ ('id',", "name='unique subscribes'), ), migrations.AddConstraint( model_name='like', constraint=models.UniqueConstraint(fields=('image', 'user'), name='unique likes'), ),", "] operations = [ migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True,", "'userSubscribed'), name='unique subscribes'), ), migrations.AddConstraint( model_name='like', constraint=models.UniqueConstraint(fields=('image', 'user'), name='unique likes'),", "import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial", "migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True,", "primary_key=True, serialize=False, verbose_name='ID')), ('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "[ migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture',", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(", "True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'), ), migrations.AddConstraint( model_name='like', constraint=models.UniqueConstraint(fields=('image', 'user'),", "on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image',", "models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('likes',", "Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('user',", "null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [", "('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Subscription',", "models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')),", "primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')), ('description', models.TextField(blank=True,", "models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Subscription', fields=[", "serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)),", "migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.TextField()),", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True,", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Image', fields=[ ('id',", "serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'), ), migrations.AddConstraint( model_name='like',", "models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(", "on 2021-01-07 00:42 from django.conf import settings from django.db import", "('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed',", "upload_to='')), ('description', models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user',", "from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class", "Generated by Django 3.1.3 on 2021-01-07 00:42 from django.conf import", "models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Like',", "models.ImageField(blank=True, default='default.png', null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE,", "# Generated by Django 3.1.3 on 2021-01-07 00:42 from django.conf", "models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date',", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ],", "('picture', models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)),", "verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed',", "to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')), ('description',", "import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies", "by Django 3.1.3 on 2021-01-07 00:42 from django.conf import settings", "from django.conf import settings from django.db import migrations, models import", "default='default.png', null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "('description', models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png', null=True,", "migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True,", "('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id',", "), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value',", "models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True,", "to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True,", "operations = [ migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "django.conf import settings from django.db import migrations, models import django.db.models.deletion", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True,", "('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)),", "name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.TextField()), ('date',", "to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'), )," ]
[ "presents on a given directory \"\"\" for module in os.listdir(directory):", "= directory + \"/\" + module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package,", "load automatically all the python module presents on a given", "module[-3:] != \".py\": continue else: module_import_name = \"{}.{}\".format(package, module[:-3]) importlib.import_module(module_import_name)", "class. \"\"\" @staticmethod def load(directory, package): \"\"\" It is an", "directory + \"/\" + module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module))", "def load(directory, package): \"\"\" It is an utility to load", "to load automatically all the python module presents on a", "@staticmethod def load(directory, package): \"\"\" It is an utility to", "+ \"/\" + module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module)) if", "It is an utility to load automatically all the python", "\"\"\" @staticmethod def load(directory, package): \"\"\" It is an utility", "== \"__init__.py\" or module[-3:] != \".py\": continue else: module_import_name =", "ControllerLoader class. \"\"\" @staticmethod def load(directory, package): \"\"\" It is", "package): \"\"\" It is an utility to load automatically all", "an utility to load automatically all the python module presents", "if module == \"__init__.py\" or module[-3:] != \".py\": continue else:", "os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module)) if module == \"__init__.py\" or module[-3:]", "sub_dir = directory + \"/\" + module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir,", "module)) if module == \"__init__.py\" or module[-3:] != \".py\": continue", "directory \"\"\" for module in os.listdir(directory): sub_dir = directory +", "\"\"\" The ControllerLoader class. \"\"\" @staticmethod def load(directory, package): \"\"\"", "\"/\" + module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module)) if module", "python module presents on a given directory \"\"\" for module", "module == \"__init__.py\" or module[-3:] != \".py\": continue else: module_import_name", "ControllerLoader: \"\"\" The ControllerLoader class. \"\"\" @staticmethod def load(directory, package):", "all the python module presents on a given directory \"\"\"", "\"\"\" for module in os.listdir(directory): sub_dir = directory + \"/\"", "the python module presents on a given directory \"\"\" for", "module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module)) if module == \"__init__.py\"", "module presents on a given directory \"\"\" for module in", "a given directory \"\"\" for module in os.listdir(directory): sub_dir =", "automatically all the python module presents on a given directory", "or module[-3:] != \".py\": continue else: module_import_name = \"{}.{}\".format(package, module[:-3])", "on a given directory \"\"\" for module in os.listdir(directory): sub_dir", "if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module)) if module == \"__init__.py\" or", "\"{}.{}\".format(package, module)) if module == \"__init__.py\" or module[-3:] != \".py\":", "+ module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module)) if module ==", "is an utility to load automatically all the python module", "<reponame>KiraPC/fastapi-router-controller import os import importlib class ControllerLoader: \"\"\" The ControllerLoader", "module in os.listdir(directory): sub_dir = directory + \"/\" + module", "given directory \"\"\" for module in os.listdir(directory): sub_dir = directory", "utility to load automatically all the python module presents on", "import os import importlib class ControllerLoader: \"\"\" The ControllerLoader class.", "importlib class ControllerLoader: \"\"\" The ControllerLoader class. \"\"\" @staticmethod def", "The ControllerLoader class. \"\"\" @staticmethod def load(directory, package): \"\"\" It", "for module in os.listdir(directory): sub_dir = directory + \"/\" +", "import importlib class ControllerLoader: \"\"\" The ControllerLoader class. \"\"\" @staticmethod", "os.listdir(directory): sub_dir = directory + \"/\" + module if os.path.isdir(sub_dir):", "ControllerLoader.load(sub_dir, \"{}.{}\".format(package, module)) if module == \"__init__.py\" or module[-3:] !=", "load(directory, package): \"\"\" It is an utility to load automatically", "\"__init__.py\" or module[-3:] != \".py\": continue else: module_import_name = \"{}.{}\".format(package,", "\"\"\" It is an utility to load automatically all the", "class ControllerLoader: \"\"\" The ControllerLoader class. \"\"\" @staticmethod def load(directory,", "os import importlib class ControllerLoader: \"\"\" The ControllerLoader class. \"\"\"", "in os.listdir(directory): sub_dir = directory + \"/\" + module if" ]
[ "werkzeug.security import check_password_hash, generate_password_hash from app import db from app.mod_auth.forms", "import Blueprint, Flask, send_from_directory from werkzeug.security import check_password_hash, generate_password_hash from", "app.mod_auth.models import User mod_ecomm = Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\",", "@mod_ecomm.route(\"/\", defaults={'path': ''}) def serve(path): if path: return send_from_directory(mod_ecomm.static_folder, path)", "def serve(path): if path: return send_from_directory(mod_ecomm.static_folder, path) else: return send_from_directory(mod_ecomm.static_folder,", "flask import Blueprint, Flask, send_from_directory from werkzeug.security import check_password_hash, generate_password_hash", "from app.mod_auth.models import User mod_ecomm = Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build')", "static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path': ''}) def serve(path): if path: return send_from_directory(mod_ecomm.static_folder,", "generate_password_hash from app import db from app.mod_auth.forms import LoginForm from", "db from app.mod_auth.forms import LoginForm from app.mod_auth.models import User mod_ecomm", "''}) def serve(path): if path: return send_from_directory(mod_ecomm.static_folder, path) else: return", "Flask, send_from_directory from werkzeug.security import check_password_hash, generate_password_hash from app import", "url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path': ''}) def serve(path): if path: return", "from flask import Blueprint, Flask, send_from_directory from werkzeug.security import check_password_hash,", "from werkzeug.security import check_password_hash, generate_password_hash from app import db from", "Blueprint, Flask, send_from_directory from werkzeug.security import check_password_hash, generate_password_hash from app", "import LoginForm from app.mod_auth.models import User mod_ecomm = Blueprint('products', __name__,", "import check_password_hash, generate_password_hash from app import db from app.mod_auth.forms import", "Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path': ''}) def serve(path): if", "__name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path': ''}) def serve(path): if path:", "User mod_ecomm = Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path': ''})", "check_password_hash, generate_password_hash from app import db from app.mod_auth.forms import LoginForm", "app.mod_auth.forms import LoginForm from app.mod_auth.models import User mod_ecomm = Blueprint('products',", "from app.mod_auth.forms import LoginForm from app.mod_auth.models import User mod_ecomm =", "send_from_directory from werkzeug.security import check_password_hash, generate_password_hash from app import db", "from app import db from app.mod_auth.forms import LoginForm from app.mod_auth.models", "app import db from app.mod_auth.forms import LoginForm from app.mod_auth.models import", "import db from app.mod_auth.forms import LoginForm from app.mod_auth.models import User", "mod_ecomm = Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path': ''}) def", "= Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path': ''}) def serve(path):", "LoginForm from app.mod_auth.models import User mod_ecomm = Blueprint('products', __name__, url_prefix='/products',", "serve(path): if path: return send_from_directory(mod_ecomm.static_folder, path) else: return send_from_directory(mod_ecomm.static_folder, 'index.html')", "defaults={'path': ''}) def serve(path): if path: return send_from_directory(mod_ecomm.static_folder, path) else:", "import User mod_ecomm = Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route(\"/\", defaults={'path':" ]
[]
[ "settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message verification failed!\") return Response(status=status.HTTP_403_FORBIDDEN) # verification challenge,", "challenge, convert to signature verification instead: if slack_message.get('type') == 'url_verification':", "in slack_message: event = slack_message.get('event') if settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try:", "request. \"\"\" log = logging.getLogger(__name__) slack_message = request.data if slack_message.get('token')", "handler from zenslackchat.models import SlackApp from zenslackchat.models import ZendeskApp class", "ID>/event-subscriptions you need to: - Enable Events from \"Off\" to", "API for this. Handy documentation for Slack events: https://api.slack.com/events-api The", "**kwargs): \"\"\"Events will come in over a POST request. \"\"\"", "will come in over a POST request. \"\"\" log =", "\"\"\" log = logging.getLogger(__name__) slack_message = request.data if slack_message.get('token') !=", "# noqa # I want all event even if they", "import status from rest_framework.views import APIView from rest_framework.response import Response", "Response(status=status.HTTP_403_FORBIDDEN) # verification challenge, convert to signature verification instead: if", "= request.data if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message verification failed!\")", "in over a POST request. \"\"\" log = logging.getLogger(__name__) slack_message", "webapp REST API for this. Handy documentation for Slack events:", "instead of using the RTM API. This is handy as", "to events to receive them. From https://api.slack.com/apps/<APP ID>/event-subscriptions you need", "URL\" e.g.: http://<instance id>.ngrok.io/slack/events/ - Then \"Subscribe to events on", "a specifc bot process just to handle events. Instead I", "of users\" - Click \"Add Workspace Event\" and add \"message.channels\".", "app needs to subscribe to events to receive them. From", "no more # events will be sent. log.exception(\"Slack message_handler error:", "signature verification instead: if slack_message.get('type') == 'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK)", "will be marked as broken and then no more #", "SlackApp from zenslackchat.models import ZendeskApp class Events(APIView): \"\"\"Handle Events using", "\"On\" - Enter the \"Request URL\" e.g.: http://<instance id>.ngrok.io/slack/events/ -", "from zenslackchat.models import SlackApp from zenslackchat.models import ZendeskApp class Events(APIView):", "if slack_message.get('type') == 'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK) if 'event' in", "pprint import logging from django.conf import settings from rest_framework import", "events. Instead I can just using the webapp REST API", "need to run a specifc bot process just to handle", "be marked as broken and then no more # events", "and add \"message.channels\". Message on channels will now start being", "marked as broken and then no more # events will", "webapp instead of using the RTM API. This is handy", "is handy as i don't need to run a specifc", "behalf of users\" - Click \"Add Workspace Event\" and add", "I want all event even if they cause me problems.", "From https://api.slack.com/apps/<APP ID>/event-subscriptions you need to: - Enable Events from", "== 'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK) if 'event' in slack_message: event", "they cause me problems. If I don't # accept the", "import Response from zenslackchat.message import handler from zenslackchat.models import SlackApp", "django.conf import settings from rest_framework import status from rest_framework.views import", "the webhook will be marked as broken and then no", "Handy documentation for Slack events: https://api.slack.com/events-api The app needs to", "to receive them. From https://api.slack.com/apps/<APP ID>/event-subscriptions you need to: -", "need to: - Enable Events from \"Off\" to \"On\" -", "user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except: # noqa # I want all", "import logging from django.conf import settings from rest_framework import status", "as broken and then no more # events will be", "logging.getLogger(__name__) slack_message = request.data if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message", "this. Handy documentation for Slack events: https://api.slack.com/events-api The app needs", "APIView from rest_framework.response import Response from zenslackchat.message import handler from", "from zenslackchat.models import ZendeskApp class Events(APIView): \"\"\"Handle Events using the", "- Click \"Add Workspace Event\" and add \"message.channels\". Message on", "slack_message: event = slack_message.get('event') if settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try: handler(", "Workspace Event\" and add \"message.channels\". Message on channels will now", "start being recieved. The bot will need to be invited", "channel first. \"\"\" def post(self, request, *args, **kwargs): \"\"\"Events will", "def post(self, request, *args, **kwargs): \"\"\"Events will come in over", "instead: if slack_message.get('type') == 'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK) if 'event'", "our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except: #", "= logging.getLogger(__name__) slack_message = request.data if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack", "over a POST request. \"\"\" log = logging.getLogger(__name__) slack_message =", "process just to handle events. Instead I can just using", "If I don't # accept the webhook will be marked", "will now start being recieved. The bot will need to", "- Then \"Subscribe to events on behalf of users\" -", "using the RTM API. This is handy as i don't", "the webapp instead of using the RTM API. This is", "just using the webapp REST API for this. Handy documentation", "now start being recieved. The bot will need to be", "Events(APIView): \"\"\"Handle Events using the webapp instead of using the", "you need to: - Enable Events from \"Off\" to \"On\"", "\"Request URL\" e.g.: http://<instance id>.ngrok.io/slack/events/ - Then \"Subscribe to events", "event = slack_message.get('event') if settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try: handler( event,", "then no more # events will be sent. log.exception(\"Slack message_handler", "all event even if they cause me problems. If I", "and then no more # events will be sent. log.exception(\"Slack", "invited to a channel first. \"\"\" def post(self, request, *args,", "Then \"Subscribe to events on behalf of users\" - Click", "status from rest_framework.views import APIView from rest_framework.response import Response from", "verification challenge, convert to signature verification instead: if slack_message.get('type') ==", "noqa # I want all event even if they cause", "\"\"\" def post(self, request, *args, **kwargs): \"\"\"Events will come in", "from django.conf import settings from rest_framework import status from rest_framework.views", "a POST request. \"\"\" log = logging.getLogger(__name__) slack_message = request.data", "to: - Enable Events from \"Off\" to \"On\" - Enter", "return Response(data=slack_message, status=status.HTTP_200_OK) if 'event' in slack_message: event = slack_message.get('event')", "if they cause me problems. If I don't # accept", "'event' in slack_message: event = slack_message.get('event') if settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n')", "\"Off\" to \"On\" - Enter the \"Request URL\" e.g.: http://<instance", "id>.ngrok.io/slack/events/ - Then \"Subscribe to events on behalf of users\"", "\"Add Workspace Event\" and add \"message.channels\". Message on channels will", "# I want all event even if they cause me", "group_id=settings.ZENDESK_GROUP_ID, ) except: # noqa # I want all event", "# accept the webhook will be marked as broken and", "just to handle events. Instead I can just using the", "request, *args, **kwargs): \"\"\"Events will come in over a POST", "handy as i don't need to run a specifc bot", "to signature verification instead: if slack_message.get('type') == 'url_verification': return Response(data=slack_message,", "- Enter the \"Request URL\" e.g.: http://<instance id>.ngrok.io/slack/events/ - Then", "https://api.slack.com/apps/<APP ID>/event-subscriptions you need to: - Enable Events from \"Off\"", "recieved. The bot will need to be invited to a", "want all event even if they cause me problems. If", "more # events will be sent. log.exception(\"Slack message_handler error: \")", "REST API for this. Handy documentation for Slack events: https://api.slack.com/events-api", "class Events(APIView): \"\"\"Handle Events using the webapp instead of using", "a channel first. \"\"\" def post(self, request, *args, **kwargs): \"\"\"Events", "to events on behalf of users\" - Click \"Add Workspace", "zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except: # noqa #", "The app needs to subscribe to events to receive them.", "*args, **kwargs): \"\"\"Events will come in over a POST request.", "receive them. From https://api.slack.com/apps/<APP ID>/event-subscriptions you need to: - Enable", ") except: # noqa # I want all event even", "events will be sent. log.exception(\"Slack message_handler error: \") return Response(status=status.HTTP_200_OK)", "zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except: # noqa # I want", "from rest_framework.views import APIView from rest_framework.response import Response from zenslackchat.message", "'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK) if 'event' in slack_message: event =", "from rest_framework import status from rest_framework.views import APIView from rest_framework.response", "to \"On\" - Enter the \"Request URL\" e.g.: http://<instance id>.ngrok.io/slack/events/", "events on behalf of users\" - Click \"Add Workspace Event\"", "from \"Off\" to \"On\" - Enter the \"Request URL\" e.g.:", "e.g.: http://<instance id>.ngrok.io/slack/events/ - Then \"Subscribe to events on behalf", "This is handy as i don't need to run a", "handle events. Instead I can just using the webapp REST", "convert to signature verification instead: if slack_message.get('type') == 'url_verification': return", "zenslackchat.models import ZendeskApp class Events(APIView): \"\"\"Handle Events using the webapp", "- Enable Events from \"Off\" to \"On\" - Enter the", "them. From https://api.slack.com/apps/<APP ID>/event-subscriptions you need to: - Enable Events", "the \"Request URL\" e.g.: http://<instance id>.ngrok.io/slack/events/ - Then \"Subscribe to", "cause me problems. If I don't # accept the webhook", "using the webapp instead of using the RTM API. This", "broken and then no more # events will be sent.", "log = logging.getLogger(__name__) slack_message = request.data if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN:", "I can just using the webapp REST API for this.", "if settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(),", "import settings from rest_framework import status from rest_framework.views import APIView", "bot will need to be invited to a channel first.", "slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message verification failed!\") return Response(status=status.HTTP_403_FORBIDDEN) #", "from rest_framework.response import Response from zenslackchat.message import handler from zenslackchat.models", "events to receive them. From https://api.slack.com/apps/<APP ID>/event-subscriptions you need to:", "failed!\") return Response(status=status.HTTP_403_FORBIDDEN) # verification challenge, convert to signature verification", "don't need to run a specifc bot process just to", "even if they cause me problems. If I don't #", "slack_message = request.data if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message verification", "documentation for Slack events: https://api.slack.com/events-api The app needs to subscribe", "try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID,", "needs to subscribe to events to receive them. From https://api.slack.com/apps/<APP", "if 'event' in slack_message: event = slack_message.get('event') if settings.DEBUG: log.debug(f'event", "= slack_message.get('event') if settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL,", "# events will be sent. log.exception(\"Slack message_handler error: \") return", "Message on channels will now start being recieved. The bot", "accept the webhook will be marked as broken and then", "import APIView from rest_framework.response import Response from zenslackchat.message import handler", "# verification challenge, convert to signature verification instead: if slack_message.get('type')", "the RTM API. This is handy as i don't need", "bot process just to handle events. Instead I can just", "from zenslackchat.message import handler from zenslackchat.models import SlackApp from zenslackchat.models", "return Response(status=status.HTTP_403_FORBIDDEN) # verification challenge, convert to signature verification instead:", "API. This is handy as i don't need to run", "to run a specifc bot process just to handle events.", "I don't # accept the webhook will be marked as", "as i don't need to run a specifc bot process", "settings from rest_framework import status from rest_framework.views import APIView from", "to a channel first. \"\"\" def post(self, request, *args, **kwargs):", "post(self, request, *args, **kwargs): \"\"\"Events will come in over a", "Slack events: https://api.slack.com/events-api The app needs to subscribe to events", "don't # accept the webhook will be marked as broken", "import handler from zenslackchat.models import SlackApp from zenslackchat.models import ZendeskApp", "specifc bot process just to handle events. Instead I can", "to be invited to a channel first. \"\"\" def post(self,", "slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except: # noqa", "Enable Events from \"Off\" to \"On\" - Enter the \"Request", "rest_framework import status from rest_framework.views import APIView from rest_framework.response import", "zenslackchat.models import SlackApp from zenslackchat.models import ZendeskApp class Events(APIView): \"\"\"Handle", "logging from django.conf import settings from rest_framework import status from", "be invited to a channel first. \"\"\" def post(self, request,", "POST request. \"\"\" log = logging.getLogger(__name__) slack_message = request.data if", "\"Subscribe to events on behalf of users\" - Click \"Add", "me problems. If I don't # accept the webhook will", "RTM API. This is handy as i don't need to", "on behalf of users\" - Click \"Add Workspace Event\" and", "event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except:", "message verification failed!\") return Response(status=status.HTTP_403_FORBIDDEN) # verification challenge, convert to", "\"\"\"Handle Events using the webapp instead of using the RTM", "Instead I can just using the webapp REST API for", "\"\"\"Events will come in over a POST request. \"\"\" log", "come in over a POST request. \"\"\" log = logging.getLogger(__name__)", "\"message.channels\". Message on channels will now start being recieved. The", "settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI,", "import pprint import logging from django.conf import settings from rest_framework", "Event\" and add \"message.channels\". Message on channels will now start", "https://api.slack.com/events-api The app needs to subscribe to events to receive", "users\" - Click \"Add Workspace Event\" and add \"message.channels\". Message", "log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI,", "except: # noqa # I want all event even if", "of using the RTM API. This is handy as i", "webhook will be marked as broken and then no more", "on channels will now start being recieved. The bot will", "events: https://api.slack.com/events-api The app needs to subscribe to events to", "http://<instance id>.ngrok.io/slack/events/ - Then \"Subscribe to events on behalf of", "The bot will need to be invited to a channel", "first. \"\"\" def post(self, request, *args, **kwargs): \"\"\"Events will come", "verification failed!\") return Response(status=status.HTTP_403_FORBIDDEN) # verification challenge, convert to signature", "!= settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message verification failed!\") return Response(status=status.HTTP_403_FORBIDDEN) # verification", "workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except: # noqa # I", "for this. Handy documentation for Slack events: https://api.slack.com/events-api The app", "Events from \"Off\" to \"On\" - Enter the \"Request URL\"", "Response from zenslackchat.message import handler from zenslackchat.models import SlackApp from", "run a specifc bot process just to handle events. Instead", "to subscribe to events to receive them. From https://api.slack.com/apps/<APP ID>/event-subscriptions", "using the webapp REST API for this. Handy documentation for", "Enter the \"Request URL\" e.g.: http://<instance id>.ngrok.io/slack/events/ - Then \"Subscribe", "ZendeskApp class Events(APIView): \"\"\"Handle Events using the webapp instead of", "log.error(\"Slack message verification failed!\") return Response(status=status.HTTP_403_FORBIDDEN) # verification challenge, convert", "Response(data=slack_message, status=status.HTTP_200_OK) if 'event' in slack_message: event = slack_message.get('event') if", "being recieved. The bot will need to be invited to", "channels will now start being recieved. The bot will need", "slack_message.get('event') if settings.DEBUG: log.debug(f'event received:\\n{pprint.pformat(event)}\\n') try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(),", "subscribe to events to receive them. From https://api.slack.com/apps/<APP ID>/event-subscriptions you", "rest_framework.views import APIView from rest_framework.response import Response from zenslackchat.message import", "to handle events. Instead I can just using the webapp", "problems. If I don't # accept the webhook will be", "Click \"Add Workspace Event\" and add \"message.channels\". Message on channels", "zenslackchat.message import handler from zenslackchat.models import SlackApp from zenslackchat.models import", "status=status.HTTP_200_OK) if 'event' in slack_message: event = slack_message.get('event') if settings.DEBUG:", "will need to be invited to a channel first. \"\"\"", "slack_message.get('type') == 'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK) if 'event' in slack_message:", "handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, )", "import ZendeskApp class Events(APIView): \"\"\"Handle Events using the webapp instead", "add \"message.channels\". Message on channels will now start being recieved.", "Events using the webapp instead of using the RTM API.", "request.data if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message verification failed!\") return", "if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error(\"Slack message verification failed!\") return Response(status=status.HTTP_403_FORBIDDEN)", "verification instead: if slack_message.get('type') == 'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK) if", "can just using the webapp REST API for this. Handy", "for Slack events: https://api.slack.com/events-api The app needs to subscribe to", "received:\\n{pprint.pformat(event)}\\n') try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID,", "the webapp REST API for this. Handy documentation for Slack", "import SlackApp from zenslackchat.models import ZendeskApp class Events(APIView): \"\"\"Handle Events", "i don't need to run a specifc bot process just", "event even if they cause me problems. If I don't", "need to be invited to a channel first. \"\"\" def", "rest_framework.response import Response from zenslackchat.message import handler from zenslackchat.models import" ]
[ "trace_isolated_cores(): \"\"\" Trace isolated_cores from Airship deployment :return: value traced", "of server hardware used in the role \"\"\" hardware_profile =", "PDF :return: isolated_cores value expected by the PDF \"\"\" worker_role", "2.0 (the \"License\"); # you may not use this file", "'5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) #", "\"\"\" pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def", "set2 def hex_to_comma_list(hex_mask): \"\"\" Converts CPU mask given in hex", "'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if", "if 'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores =", "x.split(','): if '-' in part: a, b = part.split('-') a,", "pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name):", "role_details = role return role_details def get_platform_profile(profile_name): \"\"\" Searches and", "settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters) return", "\"\"\" Trace vnf_reserved_cores from Airship deployment :return: value traced from", "in hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details = profile return", "\"\"\" logger = logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters()", "= pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters) return filters def", "value traced from `isolcpus` key in `/proc/cmdline` \"\"\" pod =", "Role for worker nodes in PDF :return: vswitch_dpdk_lcores value expected", "import json import re import logging from tools.kube_utils import kube_exec,", "int(b) result.extend(range(a, b + 1)) elif part != '': a", "== role_name: role_details = role return role_details def get_platform_profile(profile_name): \"\"\"", "trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))", "using ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t',", "\"\"\" pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',')", "of actual deployment \"\"\" try: config = get_nova_conf() vcpu_pin_set =", "vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores()", "def required_nova_scheduler_filters(): \"\"\" Required nova scheduler_filters by the PDF \"\"\"", "set2 = set(convert_range_to_list(range2)) return set1 == set2 def are_lists_equal(list1, list2):", "roles = settings.getValue('pdf_file')['roles'] for role in roles: if role['name'] ==", "list of cores \"\"\" binary = bin(int(hex_mask, 16))[2:] reversed_binary =", "= {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio': required_value", "for option in proc_cmd.split(): if 'isolcpus' in option: _, isolcpus_value", "License for the specific language governing permissions and # limitations", "'' filters = filters.split(',') map(str.strip, filters) return filters def required_nova_scheduler_filters():", "store_result(logger, result) return result def nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check \"\"\" logger", "'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value,", "def required_isolated_cores(): \"\"\" Returns value of `isolated_cpus` from platform_profile used", "= get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters", "role \"\"\" role = get_role(role_name) profile = get_platform_profile(role['platform_profile']) return profile", "== profile_name: profile_details = profile return profile_details def get_processor_profile(profile_name): \"\"\"", "by the PDF \"\"\" pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']", "',' i = i + 1 return output[:-1] def comma_list_to_hex(cpus):", "'/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response) return", "} if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] =", "import settings from internal import store_result ########### # Checks ###########", "def get_cores_by_role(role_name): \"\"\" Returns cpu cores list of server hardware", "traced_value, 'required_ratio': required_value } } if traced_value == required_value: result['criteria']", "hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_dpdk_lcores(): \"\"\"", "= re.findall(\"[a-zA-Z0-9-]+=\", response) for key in match: response = response.replace(key,", "= trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result = {'category': 'compute', 'case_name':", "role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles:", "hardware used in the role \"\"\" hardware_profile = get_hardware_profile_by_role(role_name) processor_profile", "traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] =", "= 'fail' store_result(logger, result) return result ############### # helper functions", "result['criteria'] = 'fail' store_result(logger, result) return result def os_reserved_cores_check(): \"\"\"", "= settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores =", "details of a role \"\"\" role = get_role(role_name) profile =", "delimiter :param key_value_str: example string `someKey=somevalue` :param delimiter: default delimiter", "\"\"\" Trace vswitch_pmd_cores from Airship deployment :return: value traced from", "vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = ''", "['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response)", "return pmd_cores def required_vswitch_pmd_cores(): \"\"\" Returns value of vswitch_pmd_cores from", "\"\"\" Trace isolated_cores from Airship deployment :return: value traced from", "16))[2:] reversed_binary = binary[::-1] i = 0 output = \"\"", "[] for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf():", "of cpu cores in corresponding hex value of cpu-mask \"\"\"", "OF ANY KIND, either express or implied. # See the", "See the License for the specific language governing permissions and", "int(part) result.append(a) # remove duplicates result = list(dict.fromkeys(result)) return result", "\"\"\" Searches and returns processor_profile with `profile_name` \"\"\" processor_profiles =", "= profile return profile_details def get_processor_profile(profile_name): \"\"\" Searches and returns", "to in writing, software # distributed under the License is", "openvswitchdb using ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl',", "proc_cmd = kube_exec(pod, cmd) for option in proc_cmd.split(): if 'isolcpus'", "of vswitch_pmd_cores from platform_profile used by Role for worker nodes", "worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores():", "= logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result =", "!= '': a = int(part) result.append(a) # remove duplicates result", "or agreed to in writing, software # distributed under the", "Airship deployment :return: value traced from `other_config:dpdk-lcore-mask` in openvswitchdb using", "\"\"\" Returns value of vswitch_pmd_cores from platform_profile used by Role", "role \"\"\" role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile", "`cpu_allocation_ratio` key in nova.conf of actual deployment \"\"\" try: config", "import re import logging from tools.kube_utils import kube_exec, get_pod_with_labels from", "\"\"\" os_reserved_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value", "cpu_allocation_ratio by the PDF \"\"\" pdf = settings.getValue('pdf_file') cpu_allocation_ratio =", "value traced from `other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod", "logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value = required_os_reserved_cores() result = {'category':", "PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores']", "non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) #", "compliance with the License. # You may obtain a copy", "get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores(): \"\"\" Trace vnf_reserved_cores from Airship", "PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores']", "if traced_value == required_value: result['criteria'] = 'pass' else: result['criteria'] =", "duplicates result = list(dict.fromkeys(result)) return result def is_ranges_equals(range1, range2): \"\"\"", "\"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def", "= settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name): \"\"\"", "governing permissions and # limitations under the License. \"\"\" Compute", "required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else:", "hex to list of cores \"\"\" binary = bin(int(hex_mask, 16))[2:]", "not use this file except in compliance with the License.", "pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod,", "logger = logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value = required_isolated_cores() result", "try: config = get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError,", "else: result['criteria'] = 'fail' store_result(logger, result) return result ############### #", "binary_mask = binary_mask | (1 << int(cpu)) return format(binary_mask, '02x')", "PDF \"\"\" pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio)", "you may not use this file except in compliance with", "PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus']", "cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response", "return float(cpu_allocation_ratio) def get_role(role_name): \"\"\" Searches and returns role with", "Airship deployment os_reserved_cores = all_cores - (reserved_vnf_cores + vswitch_pmd_cores +", "[ key, value] \"\"\" key, value = key_value_str.split(delimiter) key =", "= json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else:", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"\"\" isolated_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value", "trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores))", "store_result(logger, result) return result def cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check \"\"\" logger", "\"\"\" # pylint: disable=C0103 result = [] for part in", "profile['vnf_cores'] def trace_vswitch_pmd_cores(): \"\"\" Trace vswitch_pmd_cores from Airship deployment :return:", "= get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores =", "`other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd", "config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = '' filters =", "Searches and returns platform_profile with `profile_name` \"\"\" platform_profiles = settings.getValue('pdf_file')['platform_profiles']", "output[:-1] def comma_list_to_hex(cpus): \"\"\" Converts a list of cpu cores", "traced_value = trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result = {'category': 'compute',", "return profile['isolated_cpus'] def trace_reserved_vnf_cores(): \"\"\" Trace vnf_reserved_cores from Airship deployment", "= ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response =", "= get_processor_profile(processor_profile) cpus = [] for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set']))", "in corresponding hex value of cpu-mask \"\"\" cpu_arr = cpus.split(\",\")", "profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): \"\"\" Trace vswitch_dpdk_lcores", "'fail' store_result(logger, result) return result def os_reserved_cores_check(): \"\"\" os_reserved_cores_check \"\"\"", "except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = '' filters = filters.split(',') map(str.strip,", "= response.replace(key[1:], '\"' + key[1:] + '\"') config = json.loads(response)", "trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = []", "= get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.',", "\"\"\" processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if profile['profile_name']", "def get_platform_profile(profile_name): \"\"\" Searches and returns platform_profile with `profile_name` \"\"\"", "'\"') config = json.loads(response) if 'pmd-cpu-mask' in config: pmd_cores =", "processor_profile = hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus = [] for", "result def nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check \"\"\" logger = logging.getLogger(__name__) traced_value", "trace_os_reserved_cores() required_value = required_os_reserved_cores() result = {'category': 'compute', 'case_name': 'os_reserved_cores_check',", "re import logging from tools.kube_utils import kube_exec, get_pod_with_labels from tools.conf", "response = kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response) return config", "required_vswitch_pmd_cores(): \"\"\" Returns value of vswitch_pmd_cores from platform_profile used by", "CPU mask given in hex to list of cores \"\"\"", "0 output = \"\" for bit in reversed_binary: if bit", "def is_ranges_equals(range1, range2): \"\"\" Checks whether two ranges passed as", "worker nodes in PDF :return: vswitch_dpdk_lcores value expected by the", "\"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def", "logging from tools.kube_utils import kube_exec, get_pod_with_labels from tools.conf import settings", "trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result = {'category': 'compute', 'case_name': 'reserved_vnf_cores_check',", "return result def is_ranges_equals(range1, range2): \"\"\" Checks whether two ranges", "\"\"\" set1 = set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return set1 ==", "= int(part) result.append(a) # remove duplicates result = list(dict.fromkeys(result)) return", "by the PDF \"\"\" pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters']", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "'required_filters': required_value } } if are_lists_equal(traced_value, required_value): result['criteria'] = 'pass'", "+ '\"') config = json.loads(response) if 'pmd-cpu-mask' in config: pmd_cores", "logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result = {'category':", "profile_name: profile_details = profile return profile_details def get_processor_profile(profile_name): \"\"\" Searches", "= int(a), int(b) result.extend(range(a, b + 1)) elif part !=", "convert_range_to_list(x): \"\"\" Returns list of numbers from given range as", "= logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value = required_isolated_cores() result =", "\"\"\" role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in", "\"\"\" Returns cpu cores list of server hardware used in", "= set(list1) set2 = set(list2) return set1 == set2 def", "file except in compliance with the License. # You may", "try: config = get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError,", "get_nova_conf(): \"\"\" Returns parsed nova.conf \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd", "result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_pmd_cores_check(): \"\"\"", "[3, 4, 5] \"\"\" # pylint: disable=C0103 result = []", "get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): \"\"\" Trace vswitch_dpdk_lcores from Airship", "non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as string with", "PDF :return: vnf_reserverd_core value expected by the PDF \"\"\" worker_role", "from Airship deployment :return: value traced from `other_config:pmd-cpu-mask` in openvswitchdb", ":return: isolated_cores value expected by the PDF \"\"\" worker_role =", "in hex to list of cores \"\"\" binary = bin(int(hex_mask,", "<gh_stars>1-10 # Copyright 2020 University Of Delhi. # # Licensed", "= bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1] i = 0 output", "traced from `vcpu_pin_set` key in nova.conf of actual deployment \"\"\"", "traced_value = trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result = {'category': 'compute',", "in PDF :return: vnf_reserverd_core value expected by the PDF \"\"\"", "2020 University Of Delhi. # # Licensed under the Apache", "= trace_os_reserved_cores() required_value = required_os_reserved_cores() result = {'category': 'compute', 'case_name':", "vswitch_pmd_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value =", "cores \"\"\" binary = bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1] i", "= trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result = {'category': 'compute', 'case_name':", "KIND, either express or implied. # See the License for", "role_name: role_details = role return role_details def get_platform_profile(profile_name): \"\"\" Searches", "'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters': required_value } } if are_lists_equal(traced_value,", "\"\" for bit in reversed_binary: if bit == '1': output", "permissions and # limitations under the License. \"\"\" Compute Related", "profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf(): \"\"\" Returns parsed nova.conf", "profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): \"\"\" Trace os_reserved_cores", "cpus def get_nova_conf(): \"\"\" Returns parsed nova.conf \"\"\" pod =", "given string into key and value based on delimiter :param", "cpus = [] for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus", "profile in platform_profiles: if profile['profile_name'] == profile_name: profile_details = profile", "(the \"License\"); # you may not use this file except", "config = get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError):", "= all_cores - (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) \"\"\" worker_role", "in roles: if role['name'] == role_name: role_details = role return", "} } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria']", "and returns processor_profile with `profile_name` \"\"\" processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for", "= get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus =", "import configparser import json import re import logging from tools.kube_utils", ":return: value traced from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl \"\"\"", "cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = ''", "+ key[1:] + '\"') config = json.loads(response) if 'dpdk-lcore-mask' in", "'\"') config = json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores =", "# # Unless required by applicable law or agreed to", "cpu cores related helper function def convert_range_to_list(x): \"\"\" Returns list", "vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))", "result['criteria'] = 'fail' store_result(logger, result) return result def nova_scheduler_filters_check(): \"\"\"", "Checks whether two list are identicals \"\"\" set1 = set(list1)", "result) return result def reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check \"\"\" logger =", "hex_to_comma_list(hex_mask): \"\"\" Converts CPU mask given in hex to list", "= settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if profile['profile_name'] == role['hardware_profile']:", "implied. # See the License for the specific language governing", "nodes in PDF :return: os_reserved_cores value expected by the PDF", "response = kube_exec(ovs_pod, cmd) # convert config str to json", "\"\"\" Compute Related Checks \"\"\" import configparser import json import", "= 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result", "get_role(role_name) profile = get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name): \"\"\" Returns", "scheduler_filters by the PDF \"\"\" pdf = settings.getValue('pdf_file') filters =", "'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio': required_value } } if", "= ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for option in", "value = key_value_str.split(delimiter) key = key.strip() value = value.strip() return", "traced_value = trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result = {'category': 'compute',", "= '' return pmd_cores def required_vswitch_dpdk_lcores(): \"\"\" Returns value of", "for worker nodes in PDF :return: vswitch_dpdk_lcores value expected by", "\"\"\" Checks whether two list are identicals \"\"\" set1 =", "all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores", "hardware_profile = get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus", "settings from internal import store_result ########### # Checks ########### def", "= logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result =", "= get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd)", "reserved_vnf_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value =", "== set2 def are_lists_equal(list1, list2): \"\"\" Checks whether two list", "binary_mask = 0 for cpu in cpu_arr: binary_mask = binary_mask", "in option: _, isolcpus_value = split_key_value(option) break return isolcpus_value def", "vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores", "traced_value = trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result = {'category': 'compute',", "of numbers from given range as string e.g.: convert_range_to_list('3-5') will", "== '1': output = output + str(i) + ',' i", "'fail' store_result(logger, result) return result def vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check \"\"\"", "used by Role for worker nodes in PDF :return: vswitch_dpdk_lcores", "settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores(): \"\"\" Trace", "Unless required by applicable law or agreed to in writing,", "Returns hardware profile details of a role \"\"\" role =", "import logging from tools.kube_utils import kube_exec, get_pod_with_labels from tools.conf import", "float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): \"\"\" Required cpu_allocation_ratio by the PDF \"\"\"", "filters = filters.split(',') map(str.strip, filters) return filters def trace_cpu_allocation_ratio(): \"\"\"", "the specific language governing permissions and # limitations under the", "config.read_string(response) return config ### cpu cores related helper function def", "return result ############### # helper functions ############### def trace_isolated_cores(): \"\"\"", "config = get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError):", "def get_platform_profile_by_role(role_name): \"\"\" Returns platform profile details of a role", "deployment \"\"\" try: config = get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')", "response = response.replace(key[1:], '\"' + key[1:] + '\"') config =", "PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores']", "vswitch_pmd_cores from platform_profile used by Role for worker nodes in", "from Airship deployment :return: value traced from `vcpu_pin_set` key in", "def trace_vswitch_dpdk_lcores(): \"\"\" Trace vswitch_dpdk_lcores from Airship deployment :return: value", "Returns list of numbers from given range as string e.g.:", "required_cpu_allocation_ratio(): \"\"\" Required cpu_allocation_ratio by the PDF \"\"\" pdf =", "reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores", "a role \"\"\" role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for", "to list of cores \"\"\" binary = bin(int(hex_mask, 16))[2:] reversed_binary", "return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): \"\"\" Returns value of os_reserved_cores", "are_lists_equal(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger,", "required_value = required_reserved_vnf_cores() result = {'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details':", "from Airship deployment :return: value traced from `isolcpus` key in", "set1 = set(list1) set2 = set(list2) return set1 == set2", "platform_profile used by Role for worker nodes in PDF :return:", "= {'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value", "try: config = get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError,", "binary_mask | (1 << int(cpu)) return format(binary_mask, '02x') def split_key_value(key_value_str,", "\"\"\" Returns list of numbers from given range as string", "configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): \"\"\" Required", "in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return", "get_processor_profile(profile_name): \"\"\" Searches and returns processor_profile with `profile_name` \"\"\" processor_profiles", "profile['os_reserved_cores'] def trace_nova_scheduler_filters(): \"\"\" Trace scheduler_filters from Airship deployment :return:", "Delhi. # # Licensed under the Apache License, Version 2.0", "result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_dpdk_lcores_check(): \"\"\"", "required_vswitch_pmd_cores() result = {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value,", "profile in hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details = profile", "pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters) return filters def trace_cpu_allocation_ratio():", "trace_os_reserved_cores(): \"\"\" Trace os_reserved_cores from Airship deployment os_reserved_cores = all_cores", "'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if", "\"\"\" cpu_allocation_ratio_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value", "'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } }", ":return: [ key, value] \"\"\" key, value = key_value_str.split(delimiter) key", "for profile in platform_profiles: if profile['profile_name'] == profile_name: profile_details =", "for worker nodes in PDF :return: os_reserved_cores value expected by", "result = {'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores':", "= get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores(): \"\"\" Trace vswitch_pmd_cores from", "ranges passed as string are equal e.g.: is_ranges_equals('2-5', '2-4,5') returns", "key in match: response = response.replace(key[1:], '\"' + key[1:] +", "def trace_reserved_vnf_cores(): \"\"\" Trace vnf_reserved_cores from Airship deployment :return: value", "['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod,", "returns processor_profile with `profile_name` \"\"\" processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile", ":return: vnf_reserverd_core value expected by the PDF \"\"\" worker_role =", "= [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return", "= split_key_value(option) break return isolcpus_value def required_isolated_cores(): \"\"\" Returns value", "role['hardware_profile']: profile_details = profile return profile_details def get_cores_by_role(role_name): \"\"\" Returns", "re.findall(\":[a-zA-Z0-9-]+\", response) for key in match: response = response.replace(key[1:], '\"'", "range2): \"\"\" Checks whether two ranges passed as string are", "pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return pmd_cores def", "match = re.findall(\":[a-zA-Z0-9-]+\", response) for key in match: response =", "settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores()", "'.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert config str", "<< int(cpu)) return format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='): \"\"\" splits", "roles: if role['name'] == role_name: role_details = role return role_details", "b = part.split('-') a, b = int(a), int(b) result.extend(range(a, b", "profile_details = profile return profile_details def get_platform_profile_by_role(role_name): \"\"\" Returns platform", "You may obtain a copy of the License at #", "def os_reserved_cores_check(): \"\"\" os_reserved_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value =", "cores in corresponding hex value of cpu-mask \"\"\" cpu_arr =", "string `someKey=somevalue` :param delimiter: default delimiter is `=` :return: [", "'required_ratio': required_value } } if traced_value == required_value: result['criteria'] =", "except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = '' return vcpu_pin_set def required_reserved_vnf_cores():", "else: pmd_cores = '' return pmd_cores def required_vswitch_dpdk_lcores(): \"\"\" Returns", "def trace_cpu_allocation_ratio(): \"\"\" Trace cpu_allocation_ratio from Airship deployment :return: value", "Trace vnf_reserved_cores from Airship deployment :return: value traced from `vcpu_pin_set`", "= config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return", "= 'fail' store_result(logger, result) return result def vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check", "'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = ''", "value of os_reserved_cores from platform_profile used by Role for worker", "settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details", "isolated_cores value expected by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME')", "get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']", "[] for part in x.split(','): if '-' in part: a,", "= required_reserved_vnf_cores() result = {'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores':", "= cpus.split(\",\") binary_mask = 0 for cpu in cpu_arr: binary_mask", "= required_nova_scheduler_filters() result = {'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters':", "+ '\":') match = re.findall(\":[a-zA-Z0-9-]+\", response) for key in match:", ":return: value traced from `cpu_allocation_ratio` key in nova.conf of actual", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "two ranges passed as string are equal e.g.: is_ranges_equals('2-5', '2-4,5')", "filters.split(',') map(str.strip, filters) return filters def required_nova_scheduler_filters(): \"\"\" Required nova", "result['criteria'] = 'fail' store_result(logger, result) return result def cpu_allocation_ratio_check(): \"\"\"", "os_reserved_cores_check(): \"\"\" os_reserved_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_os_reserved_cores()", "for profile in processor_profiles: if profile['profile_name'] == profile_name: profile_details =", "= trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result = {'category': 'compute', 'case_name':", "part.split('-') a, b = int(a), int(b) result.extend(range(a, b + 1))", "split_key_value(key_value_str, delimiter='='): \"\"\" splits given string into key and value", ":return: os_reserved_cores value expected by the PDF \"\"\" worker_role =", "\"\"\" nova_scheduler_filters_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value", "= 0 output = \"\" for bit in reversed_binary: if", "worker nodes in PDF :return: isolated_cores value expected by the", "def split_key_value(key_value_str, delimiter='='): \"\"\" splits given string into key and", "by Role for worker nodes in PDF :return: vswitch_dpdk_lcores value", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "config = json.loads(response) if 'pmd-cpu-mask' in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])", "def trace_nova_scheduler_filters(): \"\"\" Trace scheduler_filters from Airship deployment :return: value", "License. # You may obtain a copy of the License", "# Copyright 2020 University Of Delhi. # # Licensed under", "{'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value }", "configparser.MissingSectionHeaderError): vcpu_pin_set = '' return vcpu_pin_set def required_reserved_vnf_cores(): \"\"\" Returns", "else: result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_dpdk_lcores_check():", "with `role_name` \"\"\" roles = settings.getValue('pdf_file')['roles'] for role in roles:", "config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return pmd_cores", "tools.kube_utils import kube_exec, get_pod_with_labels from tools.conf import settings from internal", "platform profile details of a role \"\"\" role = get_role(role_name)", "{'traced_filters': traced_value, 'required_filters': required_value } } if are_lists_equal(traced_value, required_value): result['criteria']", "= required_vswitch_pmd_cores() result = {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores':", "required_nova_scheduler_filters(): \"\"\" Required nova scheduler_filters by the PDF \"\"\" pdf", "`other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd", "deployment \"\"\" try: config = get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')", "required_value = required_vswitch_pmd_cores() result = {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details':", "nodes in PDF :return: vnf_reserverd_core value expected by the PDF", "def required_reserved_vnf_cores(): \"\"\" Returns value of vnf_cores from platform_profile used", "== set2 def hex_to_comma_list(hex_mask): \"\"\" Converts CPU mask given in", "and value based on delimiter :param key_value_str: example string `someKey=somevalue`", "worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters():", "required_reserved_vnf_cores() result = {'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value,", "Trace vswitch_dpdk_lcores from Airship deployment :return: value traced from `other_config:dpdk-lcore-mask`", "return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): \"\"\" Required cpu_allocation_ratio by the PDF", "is_ranges_equals(range1, range2): \"\"\" Checks whether two ranges passed as string", "def required_cpu_allocation_ratio(): \"\"\" Required cpu_allocation_ratio by the PDF \"\"\" pdf", "nodes in PDF :return: vswitch_pmd_cores value expected by the PDF", "return profile def get_hardware_profile_by_role(role_name): \"\"\" Returns hardware profile details of", "= get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if", "helper function def convert_range_to_list(x): \"\"\" Returns list of numbers from", "default delimiter is `=` :return: [ key, value] \"\"\" key,", "required_reserved_vnf_cores(): \"\"\" Returns value of vnf_cores from platform_profile used by", "= filters.split(',') map(str.strip, filters) return filters def trace_cpu_allocation_ratio(): \"\"\" Trace", "result def cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check \"\"\" logger = logging.getLogger(__name__) traced_value", "'' return pmd_cores def required_vswitch_dpdk_lcores(): \"\"\" Returns value of vswitch_dpdk_lcores", "# return as string with comma separated value return ','.join(map(str,", "'': a = int(part) result.append(a) # remove duplicates result =", "else: pmd_cores = '' return pmd_cores def required_vswitch_pmd_cores(): \"\"\" Returns", "used by Role for worker nodes in PDF :return: isolated_cores", "Role for worker nodes in PDF :return: os_reserved_cores value expected", "result) return result def nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check \"\"\" logger =", "\"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "deployment :return: value traced from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl", "= 'fail' store_result(logger, result) return result def vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check", ":return: vswitch_pmd_cores value expected by the PDF \"\"\" worker_role =", "cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name): \"\"\" Searches and", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "for bit in reversed_binary: if bit == '1': output =", "reversed_binary = binary[::-1] i = 0 output = \"\" for", "profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores(): \"\"\" Trace vswitch_pmd_cores", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "`role_name` \"\"\" roles = settings.getValue('pdf_file')['roles'] for role in roles: if", "Trace isolated_cores from Airship deployment :return: value traced from `isolcpus`", "string into key and value based on delimiter :param key_value_str:", "'fail' store_result(logger, result) return result def nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check \"\"\"", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio =", "delimiter: default delimiter is `=` :return: [ key, value] \"\"\"", "function def convert_range_to_list(x): \"\"\" Returns list of numbers from given", "cmd = ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config =", "returns platform_profile with `profile_name` \"\"\" platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile", "= get_role(role_name) profile = get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name): \"\"\"", "result def is_ranges_equals(range1, range2): \"\"\" Checks whether two ranges passed", "processor_profile with `profile_name` \"\"\" processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in", "result = {'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores':", "for cpu in cpu_arr: binary_mask = binary_mask | (1 <<", "# Checks ########### def isolated_cores_check(): \"\"\" isolated_cores_check \"\"\" logger =", "= get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio", "else: result['criteria'] = 'fail' store_result(logger, result) return result def os_reserved_cores_check():", "str(i) + ',' i = i + 1 return output[:-1]", "by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role)", "in PDF :return: vswitch_dpdk_lcores value expected by the PDF \"\"\"", "related helper function def convert_range_to_list(x): \"\"\" Returns list of numbers", "return profile_details def get_processor_profile(profile_name): \"\"\" Searches and returns processor_profile with", ":return: value traced from `isolcpus` key in `/proc/cmdline` \"\"\" pod", "required_isolated_cores() result = {'category': 'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value,", "\"\"\" Returns value of vnf_cores from platform_profile used by Role", "= logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result =", "whether two ranges passed as string are equal e.g.: is_ranges_equals('2-5',", "return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): \"\"\" Trace vswitch_dpdk_lcores from Airship deployment", "settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores(): \"\"\" Trace", "nova.conf of actual deployment \"\"\" try: config = get_nova_conf() cpu_allocation_ratio", "match: response = response.replace(key, '\"' + key[:-1] + '\":') match", "platform_profile with `profile_name` \"\"\" platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile in", "########### # Checks ########### def isolated_cores_check(): \"\"\" isolated_cores_check \"\"\" logger", "= [] for part in x.split(','): if '-' in part:", "pylint: disable=C0103 result = [] for part in x.split(','): if", "return cpus def get_nova_conf(): \"\"\" Returns parsed nova.conf \"\"\" pod", "= get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd)", "cpu in cpu_arr: binary_mask = binary_mask | (1 << int(cpu))", "{'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria']", "result.extend(range(a, b + 1)) elif part != '': a =", "non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as string", "b + 1)) elif part != '': a = int(part)", "import kube_exec, get_pod_with_labels from tools.conf import settings from internal import", "hardware profile details of a role \"\"\" role = get_role(role_name)", "traced from `cpu_allocation_ratio` key in nova.conf of actual deployment \"\"\"", "isolcpus_value = split_key_value(option) break return isolcpus_value def required_isolated_cores(): \"\"\" Returns", "profile in processor_profiles: if profile['profile_name'] == profile_name: profile_details = profile", "by Role for worker nodes in PDF :return: isolated_cores value", "based on delimiter :param key_value_str: example string `someKey=somevalue` :param delimiter:", "OR CONDITIONS OF ANY KIND, either express or implied. #", "the License is distributed on an \"AS IS\" BASIS, #", "trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result = {'category': 'compute', 'case_name': 'nova_scheduler_filters_check',", "nodes in PDF :return: isolated_cores value expected by the PDF", "else: result['criteria'] = 'fail' store_result(logger, result) return result def cpu_allocation_ratio_check():", "Checks ########### def isolated_cores_check(): \"\"\" isolated_cores_check \"\"\" logger = logging.getLogger(__name__)", "mask given in hex to list of cores \"\"\" binary", "value of vswitch_pmd_cores from platform_profile used by Role for worker", "config = json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])", "split_key_value(option) break return isolcpus_value def required_isolated_cores(): \"\"\" Returns value of", "law or agreed to in writing, software # distributed under", "Airship deployment :return: value traced from `enabled_filters` key in nova.conf", "'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters': required_value } }", "'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if", "deployment :return: value traced from `other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl", "############### # helper functions ############### def trace_isolated_cores(): \"\"\" Trace isolated_cores", "worker nodes in PDF :return: vnf_reserverd_core value expected by the", "Checks whether two ranges passed as string are equal e.g.:", "configparser import json import re import logging from tools.kube_utils import", "PDF :return: vswitch_pmd_cores value expected by the PDF \"\"\" worker_role", "may obtain a copy of the License at # #", "binary[::-1] i = 0 output = \"\" for bit in", "config = get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError):", "filters = '' filters = filters.split(',') map(str.strip, filters) return filters", "may not use this file except in compliance with the", "is `=` :return: [ key, value] \"\"\" key, value =", "get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if profile['profile_name']", "from tools.conf import settings from internal import store_result ########### #", "Related Checks \"\"\" import configparser import json import re import", "this file except in compliance with the License. # You", "\"\"\" vswitch_pmd_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value", "is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger,", "= \"\" for bit in reversed_binary: if bit == '1':", "= role return role_details def get_platform_profile(profile_name): \"\"\" Searches and returns", "# # Licensed under the Apache License, Version 2.0 (the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "if are_lists_equal(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail'", "Airship deployment :return: value traced from `vcpu_pin_set` key in nova.conf", "= hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus = [] for numa", "value expected by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile", "\"\"\" binary = bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1] i =", "'\"' + key[1:] + '\"') config = json.loads(response) if 'pmd-cpu-mask'", "\"\"\" Trace scheduler_filters from Airship deployment :return: value traced from", "return format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='): \"\"\" splits given string", "# convert config str to json str match = re.findall(\"[a-zA-Z0-9-]+=\",", "actual deployment \"\"\" try: config = get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT',", "splits given string into key and value based on delimiter", "'\"' + key[1:] + '\"') config = json.loads(response) if 'dpdk-lcore-mask'", "settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): \"\"\" Trace", "vswitch_dpdk_lcores from platform_profile used by Role for worker nodes in", "'details': {'traced_filters': traced_value, 'required_filters': required_value } } if are_lists_equal(traced_value, required_value):", "required_vswitch_dpdk_lcores(): \"\"\" Returns value of vswitch_dpdk_lcores from platform_profile used by", "profile['profile_name'] == role['hardware_profile']: profile_details = profile return profile_details def get_cores_by_role(role_name):", "'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio': required_value } } if traced_value", "Returns value of os_reserved_cores from platform_profile used by Role for", "########### def isolated_cores_check(): \"\"\" isolated_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value", "result['criteria'] = 'fail' store_result(logger, result) return result def reserved_vnf_cores_check(): \"\"\"", "role_details def get_platform_profile(profile_name): \"\"\" Searches and returns platform_profile with `profile_name`", "for key in match: response = response.replace(key[1:], '\"' + key[1:]", "def required_vswitch_dpdk_lcores(): \"\"\" Returns value of vswitch_dpdk_lcores from platform_profile used", "= {'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters': required_value", "from internal import store_result ########### # Checks ########### def isolated_cores_check():", "or implied. # See the License for the specific language", "result) return result ############### # helper functions ############### def trace_isolated_cores():", "proc_cmd.split(): if 'isolcpus' in option: _, isolcpus_value = split_key_value(option) break", "of cores \"\"\" binary = bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1]", "range as string e.g.: convert_range_to_list('3-5') will give [3, 4, 5]", "nova scheduler_filters by the PDF \"\"\" pdf = settings.getValue('pdf_file') filters", "vswitch_dpdk_lcores value expected by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME')", "get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()", "'' return pmd_cores def required_vswitch_pmd_cores(): \"\"\" Returns value of vswitch_pmd_cores", "PDF \"\"\" pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters =", "= kube_exec(pod, cmd) for option in proc_cmd.split(): if 'isolcpus' in", "identicals \"\"\" set1 = set(list1) set2 = set(list2) return set1", "= config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = '' return", "\"\"\" logger = logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value = required_isolated_cores()", "os_reserved_cores = all_cores - (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) \"\"\"", "str match = re.findall(\"[a-zA-Z0-9-]+=\", response) for key in match: response", "\"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def", "nova.conf of actual deployment \"\"\" try: config = get_nova_conf() filters", "logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value = required_isolated_cores() result = {'category':", "'/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for option in proc_cmd.split(): if", "pmd_cores def required_vswitch_pmd_cores(): \"\"\" Returns value of vswitch_pmd_cores from platform_profile", "University Of Delhi. # # Licensed under the Apache License,", "filters.split(',') map(str.strip, filters) return filters def trace_cpu_allocation_ratio(): \"\"\" Trace cpu_allocation_ratio", "'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value,", "corresponding hex value of cpu-mask \"\"\" cpu_arr = cpus.split(\",\") binary_mask", "json.loads(response) if 'pmd-cpu-mask' in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores", "used by Role for worker nodes in PDF :return: vswitch_pmd_cores", "from `other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')", "profile_details = profile return profile_details def get_cores_by_role(role_name): \"\"\" Returns cpu", "4, 5] \"\"\" # pylint: disable=C0103 result = [] for", "\"\"\" vswitch_dpdk_lcores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value", "required_os_reserved_cores() result = {'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value,", "\"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def", "in PDF :return: isolated_cores value expected by the PDF \"\"\"", "### cpu cores related helper function def convert_range_to_list(x): \"\"\" Returns", "= {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value", "string e.g.: convert_range_to_list('3-5') will give [3, 4, 5] \"\"\" #", "json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores", "} if are_lists_equal(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] =", "cmd) for option in proc_cmd.split(): if 'isolcpus' in option: _,", "trace_vswitch_dpdk_lcores(): \"\"\" Trace vswitch_dpdk_lcores from Airship deployment :return: value traced", "`isolcpus` key in `/proc/cmdline` \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd =", "profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores(): \"\"\" Trace vnf_reserved_cores", "os_reserved_cores from platform_profile used by Role for worker nodes in", "= hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_pmd_cores():", "str to json str match = re.findall(\"[a-zA-Z0-9-]+=\", response) for key", "def convert_range_to_list(x): \"\"\" Returns list of numbers from given range", "\"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get',", "= '' return pmd_cores def required_vswitch_pmd_cores(): \"\"\" Returns value of", "server hardware used in the role \"\"\" hardware_profile = get_hardware_profile_by_role(role_name)", "of actual deployment \"\"\" try: config = get_nova_conf() filters =", "logger = logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value = required_os_reserved_cores() result", "if 'pmd-cpu-mask' in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores =", "+ vswitch_dpdk_lcores) \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores", "in processor_profiles: if profile['profile_name'] == profile_name: profile_details = profile return", "config ### cpu cores related helper function def convert_range_to_list(x): \"\"\"", "store_result(logger, result) return result def os_reserved_cores_check(): \"\"\" os_reserved_cores_check \"\"\" logger", "result) return result def vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check \"\"\" logger =", "string are equal e.g.: is_ranges_equals('2-5', '2-4,5') returns true \"\"\" set1", "= required_os_reserved_cores() result = {'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores':", "map(str.strip, filters) return filters def required_nova_scheduler_filters(): \"\"\" Required nova scheduler_filters", "worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores():", "'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value,", "in writing, software # distributed under the License is distributed", "result = {'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters':", "nova.conf of actual deployment \"\"\" try: config = get_nova_conf() vcpu_pin_set", "pmd_cores = '' return pmd_cores def required_vswitch_pmd_cores(): \"\"\" Returns value", "value of `isolated_cpus` from platform_profile used by Role for worker", "nova_scheduler_filters_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value =", "vswitch_pmd_cores value expected by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME')", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "= settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters)", "key[1:] + '\"') config = json.loads(response) if 'dpdk-lcore-mask' in config:", "\"\"\" Required cpu_allocation_ratio by the PDF \"\"\" pdf = settings.getValue('pdf_file')", "profile_name: profile_details = profile return profile_details def get_platform_profile_by_role(role_name): \"\"\" Returns", "profile details of a role \"\"\" role = get_role(role_name) profile", "required_value = required_cpu_allocation_ratio() result = {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details':", "get_platform_profile(profile_name): \"\"\" Searches and returns platform_profile with `profile_name` \"\"\" platform_profiles", "as string with comma separated value return ','.join(map(str, list(os_reserved_cores))) def", "cmd) # convert config str to json str match =", "def comma_list_to_hex(cpus): \"\"\" Converts a list of cpu cores in", "for profile in hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details =", "from `isolcpus` key in `/proc/cmdline` \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd", "Returns value of vnf_cores from platform_profile used by Role for", "vswitch_dpdk_lcores from Airship deployment :return: value traced from `other_config:dpdk-lcore-mask` in", "the License for the specific language governing permissions and #", "filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters) return filters", "+ str(i) + ',' i = i + 1 return", "used by Role for worker nodes in PDF :return: os_reserved_cores", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result = {'category':", "i = i + 1 return output[:-1] def comma_list_to_hex(cpus): \"\"\"", "Role for worker nodes in PDF :return: vnf_reserverd_core value expected", "delimiter='='): \"\"\" splits given string into key and value based", "filters def trace_cpu_allocation_ratio(): \"\"\" Trace cpu_allocation_ratio from Airship deployment :return:", "= required_vswitch_dpdk_lcores() result = {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores':", "= ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config = configparser.ConfigParser()", "are_lists_equal(list1, list2): \"\"\" Checks whether two list are identicals \"\"\"", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "list are identicals \"\"\" set1 = set(list1) set2 = set(list2)", "1 return output[:-1] def comma_list_to_hex(cpus): \"\"\" Converts a list of", "from `vcpu_pin_set` key in nova.conf of actual deployment \"\"\" try:", "required_value = required_nova_scheduler_filters() result = {'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details':", "output = output + str(i) + ',' i = i", "vnf_reserverd_core value expected by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME')", "# distributed under the License is distributed on an \"AS", "key in match: response = response.replace(key, '\"' + key[:-1] +", "= configparser.ConfigParser() config.read_string(response) return config ### cpu cores related helper", "# Unless required by applicable law or agreed to in", "get_role(role_name): \"\"\" Searches and returns role with `role_name` \"\"\" roles", "(configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): \"\"\"", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "Returns value of vswitch_dpdk_lcores from platform_profile used by Role for", "logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result = {'category':", "'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert config", "isolated_cores from Airship deployment :return: value traced from `isolcpus` key", "deployment os_reserved_cores = all_cores - (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores)", "Checks \"\"\" import configparser import json import re import logging", "os_reserved_cores from Airship deployment os_reserved_cores = all_cores - (reserved_vnf_cores +", "to json str match = re.findall(\"[a-zA-Z0-9-]+=\", response) for key in", "of vnf_cores from platform_profile used by Role for worker nodes", "\"\"\" Trace os_reserved_cores from Airship deployment os_reserved_cores = all_cores -", "required_value = required_isolated_cores() result = {'category': 'compute', 'case_name': 'isolated_cores_check', 'details':", "+ 1)) elif part != '': a = int(part) result.append(a)", "the Apache License, Version 2.0 (the \"License\"); # you may", "'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } }", "cpu-mask \"\"\" cpu_arr = cpus.split(\",\") binary_mask = 0 for cpu", "return result def reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check \"\"\" logger = logging.getLogger(__name__)", "1)) elif part != '': a = int(part) result.append(a) #", "return config ### cpu cores related helper function def convert_range_to_list(x):", "cpu_allocation_ratio_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value =", "\"\"\" Returns parsed nova.conf \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd =", "get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set =", "Returns parsed nova.conf \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat',", "bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1] i = 0 output =", "\"\"\" set1 = set(list1) set2 = set(list2) return set1 ==", "in `/proc/cmdline` \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline']", "configparser.ConfigParser() config.read_string(response) return config ### cpu cores related helper function", "\"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def", "comma separated value return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): \"\"\" Returns", "e.g.: is_ranges_equals('2-5', '2-4,5') returns true \"\"\" set1 = set(convert_range_to_list(range1)) set2", "by Role for worker nodes in PDF :return: vswitch_pmd_cores value", "= output + str(i) + ',' i = i +", "except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio():", "= [] for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def", "set1 == set2 def hex_to_comma_list(hex_mask): \"\"\" Converts CPU mask given", "processor_profiles: if profile['profile_name'] == profile_name: profile_details = profile return profile_details", "required_vswitch_dpdk_lcores() result = {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value,", "\"\"\" platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles: if profile['profile_name']", "\"\"\" Returns value of `isolated_cpus` from platform_profile used by Role", "def reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value =", "| (1 << int(cpu)) return format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='):", "response) for key in match: response = response.replace(key, '\"' +", "value traced from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod", "under the License is distributed on an \"AS IS\" BASIS,", "ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5',", "from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')", "def vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check \"\"\" logger = logging.getLogger(__name__) traced_value =", "store_result(logger, result) return result def reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check \"\"\" logger", "config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return float(cpu_allocation_ratio)", "two list are identicals \"\"\" set1 = set(list1) set2 =", "\"\"\" import configparser import json import re import logging from", "scheduler_filters from Airship deployment :return: value traced from `enabled_filters` key", "into key and value based on delimiter :param key_value_str: example", "'pmd-cpu-mask' in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = ''", "and returns platform_profile with `profile_name` \"\"\" platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for", "= 0 for cpu in cpu_arr: binary_mask = binary_mask |", "example string `someKey=somevalue` :param delimiter: default delimiter is `=` :return:", "value of vswitch_dpdk_lcores from platform_profile used by Role for worker", "result = {'category': 'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores':", "{'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters': required_value }", "get_platform_profile_by_role(role_name): \"\"\" Returns platform profile details of a role \"\"\"", "platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles: if profile['profile_name'] ==", "return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): \"\"\" Trace scheduler_filters from Airship deployment", "== role['hardware_profile']: profile_details = profile return profile_details def get_cores_by_role(role_name): \"\"\"", "Returns cpu cores list of server hardware used in the", "result def reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value", "'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value,", ":param key_value_str: example string `someKey=somevalue` :param delimiter: default delimiter is", "return profile['vnf_cores'] def trace_vswitch_pmd_cores(): \"\"\" Trace vswitch_pmd_cores from Airship deployment", "settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles: if profile['profile_name'] == profile_name: profile_details", "result = {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores':", "by Role for worker nodes in PDF :return: vnf_reserverd_core value", "= kube_exec(ovs_pod, cmd) # convert config str to json str", "'fail' store_result(logger, result) return result def reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check \"\"\"", "from platform_profile used by Role for worker nodes in PDF", "list2): \"\"\" Checks whether two list are identicals \"\"\" set1", "def required_os_reserved_cores(): \"\"\" Returns value of os_reserved_cores from platform_profile used", "\"\"\" try: config = get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except", "= i + 1 return output[:-1] def comma_list_to_hex(cpus): \"\"\" Converts", "import store_result ########### # Checks ########### def isolated_cores_check(): \"\"\" isolated_cores_check", "settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): \"\"\" Trace", "ANY KIND, either express or implied. # See the License", "filters) return filters def trace_cpu_allocation_ratio(): \"\"\" Trace cpu_allocation_ratio from Airship", "\"\"\" Converts a list of cpu cores in corresponding hex", "expected by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile =", "the License. # You may obtain a copy of the", "output + str(i) + ',' i = i + 1", "# See the License for the specific language governing permissions", "in reversed_binary: if bit == '1': output = output +", "PDF :return: os_reserved_cores value expected by the PDF \"\"\" worker_role", "in PDF :return: vswitch_pmd_cores value expected by the PDF \"\"\"", "profile = get_processor_profile(processor_profile) cpus = [] for numa in profile['profile_info']['numas']:", "if 'isolcpus' in option: _, isolcpus_value = split_key_value(option) break return", "= settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): \"\"\"", "'-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd)", "pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip,", "vswitch_pmd_cores + vswitch_dpdk_lcores) \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role)", "returns true \"\"\" set1 = set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return", "= logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result =", "result['criteria'] = 'fail' store_result(logger, result) return result ############### # helper", "= profile return profile_details def get_cores_by_role(role_name): \"\"\" Returns cpu cores", "= 'fail' store_result(logger, result) return result def reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check", "\"\"\" Returns value of os_reserved_cores from platform_profile used by Role", "of vswitch_dpdk_lcores from platform_profile used by Role for worker nodes", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "value] \"\"\" key, value = key_value_str.split(delimiter) key = key.strip() value", "pmd_cores def required_vswitch_dpdk_lcores(): \"\"\" Returns value of vswitch_dpdk_lcores from platform_profile", "key in `/proc/cmdline` \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat',", "writing, software # distributed under the License is distributed on", "i = 0 output = \"\" for bit in reversed_binary:", "isolated_cores_check(): \"\"\" isolated_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_isolated_cores()", "= 'fail' store_result(logger, result) return result def os_reserved_cores_check(): \"\"\" os_reserved_cores_check", "deployment :return: value traced from `cpu_allocation_ratio` key in nova.conf of", "of cpu-mask \"\"\" cpu_arr = cpus.split(\",\") binary_mask = 0 for", "actual deployment \"\"\" try: config = get_nova_conf() vcpu_pin_set = config.get('DEFAULT',", "traced from `other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod =", "['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for option in proc_cmd.split():", "PDF :return: vswitch_dpdk_lcores value expected by the PDF \"\"\" worker_role", "limitations under the License. \"\"\" Compute Related Checks \"\"\" import", "'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if", "processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if profile['profile_name'] ==", "\"\"\" logger = logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value = required_os_reserved_cores()", "'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if", "get_pod_with_labels from tools.conf import settings from internal import store_result ###########", "def nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check \"\"\" logger = logging.getLogger(__name__) traced_value =", "cmd) config = configparser.ConfigParser() config.read_string(response) return config ### cpu cores", "get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config", "true \"\"\" set1 = set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return set1", "vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores()", "{'category': 'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value }", ":return: value traced from `other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl \"\"\"", "'-' in part: a, b = part.split('-') a, b =", "filters) return filters def required_nova_scheduler_filters(): \"\"\" Required nova scheduler_filters by", "from `enabled_filters` key in nova.conf of actual deployment \"\"\" try:", "trace_reserved_vnf_cores(): \"\"\" Trace vnf_reserved_cores from Airship deployment :return: value traced", "set2 = set(list2) return set1 == set2 def hex_to_comma_list(hex_mask): \"\"\"", "= settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): \"\"\"", "'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result ###############", "profile['profile_name'] == profile_name: profile_details = profile return profile_details def get_processor_profile(profile_name):", "filters def required_nova_scheduler_filters(): \"\"\" Required nova scheduler_filters by the PDF", "for role in roles: if role['name'] == role_name: role_details =", "return profile_details def get_platform_profile_by_role(role_name): \"\"\" Returns platform profile details of", "output = \"\" for bit in reversed_binary: if bit ==", "language governing permissions and # limitations under the License. \"\"\"", "config = configparser.ConfigParser() config.read_string(response) return config ### cpu cores related", "vswitch_dpdk_lcores) \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores =", "traced from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl \"\"\" ovs_pod =", "traced_value = trace_isolated_cores() required_value = required_isolated_cores() result = {'category': 'compute',", "\"\"\" Searches and returns role with `role_name` \"\"\" roles =", "from Airship deployment os_reserved_cores = all_cores - (reserved_vnf_cores + vswitch_pmd_cores", "Copyright 2020 University Of Delhi. # # Licensed under the", "in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = '' return", "trace_isolated_cores() required_value = required_isolated_cores() result = {'category': 'compute', 'case_name': 'isolated_cores_check',", "get_cores_by_role(role_name): \"\"\" Returns cpu cores list of server hardware used", "Role for worker nodes in PDF :return: isolated_cores value expected", "response.replace(key, '\"' + key[:-1] + '\":') match = re.findall(\":[a-zA-Z0-9-]+\", response)", "and returns role with `role_name` \"\"\" roles = settings.getValue('pdf_file')['roles'] for", "'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio': required_value } }", "= filters.split(',') map(str.strip, filters) return filters def required_nova_scheduler_filters(): \"\"\" Required", "Airship deployment :return: value traced from `cpu_allocation_ratio` key in nova.conf", "option in proc_cmd.split(): if 'isolcpus' in option: _, isolcpus_value =", "profile def get_hardware_profile_by_role(role_name): \"\"\" Returns hardware profile details of a", "nodes in PDF :return: vswitch_dpdk_lcores value expected by the PDF", "in cpu_arr: binary_mask = binary_mask | (1 << int(cpu)) return", "= logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result =", "parsed nova.conf \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf']", "required_value = required_os_reserved_cores() result = {'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details':", "return pmd_cores def required_vswitch_dpdk_lcores(): \"\"\" Returns value of vswitch_dpdk_lcores from", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf(): \"\"\" Returns", "for key in match: response = response.replace(key, '\"' + key[:-1]", "platform_profiles: if profile['profile_name'] == profile_name: profile_details = profile return profile_details", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): \"\"\" Trace vswitch_dpdk_lcores from Airship deployment :return:", "= trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores =", "\"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response =", "a, b = part.split('-') a, b = int(a), int(b) result.extend(range(a,", "isolated_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value =", "bit == '1': output = output + str(i) + ','", "= key_value_str.split(delimiter) key = key.strip() value = value.strip() return key,", "get_hardware_profile_by_role(role_name): \"\"\" Returns hardware profile details of a role \"\"\"", "if profile['profile_name'] == role['hardware_profile']: profile_details = profile return profile_details def", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "`enabled_filters` key in nova.conf of actual deployment \"\"\" try: config", "set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return set1 == set2 def are_lists_equal(list1,", "binary = bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1] i = 0", "Trace cpu_allocation_ratio from Airship deployment :return: value traced from `cpu_allocation_ratio`", "list(os_reserved_cores))) def required_os_reserved_cores(): \"\"\" Returns value of os_reserved_cores from platform_profile", "License. \"\"\" Compute Related Checks \"\"\" import configparser import json", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "result) return result def os_reserved_cores_check(): \"\"\" os_reserved_cores_check \"\"\" logger =", "set1 = set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return set1 == set2", "'1': output = output + str(i) + ',' i =", "part != '': a = int(part) result.append(a) # remove duplicates", "(reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores", "filters = filters.split(',') map(str.strip, filters) return filters def required_nova_scheduler_filters(): \"\"\"", "specific language governing permissions and # limitations under the License.", "profile return profile_details def get_platform_profile_by_role(role_name): \"\"\" Returns platform profile details", "key and value based on delimiter :param key_value_str: example string", "required_value } } if are_lists_equal(traced_value, required_value): result['criteria'] = 'pass' else:", "return result def vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check \"\"\" logger = logging.getLogger(__name__)", "value traced from `cpu_allocation_ratio` key in nova.conf of actual deployment", "os_reserved_cores value expected by the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME')", "+ 1 return output[:-1] def comma_list_to_hex(cpus): \"\"\" Converts a list", "nova.conf \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response", "# remove duplicates result = list(dict.fromkeys(result)) return result def is_ranges_equals(range1,", "# you may not use this file except in compliance", "in platform_profiles: if profile['profile_name'] == profile_name: profile_details = profile return", "= set(list2) return set1 == set2 def hex_to_comma_list(hex_mask): \"\"\" Converts", "\"\"\" reserved_vnf_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value", "vcpu_pin_set = '' return vcpu_pin_set def required_reserved_vnf_cores(): \"\"\" Returns value", "in nova.conf of actual deployment \"\"\" try: config = get_nova_conf()", "profile return profile_details def get_processor_profile(profile_name): \"\"\" Searches and returns processor_profile", "= response.replace(key, '\"' + key[:-1] + '\":') match = re.findall(\":[a-zA-Z0-9-]+\",", "= trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result = {'category': 'compute', 'case_name':", "pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name): \"\"\" Searches and returns role", "traced from `isolcpus` key in `/proc/cmdline` \"\"\" pod = get_pod_with_labels('application=nova,component=compute')", "of actual deployment \"\"\" try: config = get_nova_conf() cpu_allocation_ratio =", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "= set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return set1 == set2 def", "return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): \"\"\" Trace os_reserved_cores from Airship deployment", "{'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value }", "= settings.getValue('pdf_file')['roles'] for role in roles: if role['name'] == role_name:", "= '' filters = filters.split(',') map(str.strip, filters) return filters def", "give [3, 4, 5] \"\"\" # pylint: disable=C0103 result =", "result = {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores':", "Returns platform profile details of a role \"\"\" role =", "cpu cores list of server hardware used in the role", "under the Apache License, Version 2.0 (the \"License\"); # you", "part: a, b = part.split('-') a, b = int(a), int(b)", "Compute Related Checks \"\"\" import configparser import json import re", "filters = config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = ''", "key, value = key_value_str.split(delimiter) key = key.strip() value = value.strip()", "whether two list are identicals \"\"\" set1 = set(list1) set2", "required_os_reserved_cores(): \"\"\" Returns value of os_reserved_cores from platform_profile used by", "= logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value = required_os_reserved_cores() result =", "reversed_binary: if bit == '1': output = output + str(i)", "else: result['criteria'] = 'fail' store_result(logger, result) return result def nova_scheduler_filters_check():", "= re.findall(\":[a-zA-Z0-9-]+\", response) for key in match: response = response.replace(key[1:],", "result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return", "Trace vswitch_pmd_cores from Airship deployment :return: value traced from `other_config:pmd-cpu-mask`", "= settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles: if profile['profile_name'] == profile_name:", "logger = logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result", "`someKey=somevalue` :param delimiter: default delimiter is `=` :return: [ key,", "= list(dict.fromkeys(result)) return result def is_ranges_equals(range1, range2): \"\"\" Checks whether", "nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters()", "Airship deployment :return: value traced from `other_config:pmd-cpu-mask` in openvswitchdb using", "cpu_arr: binary_mask = binary_mask | (1 << int(cpu)) return format(binary_mask,", "required_value = required_vswitch_dpdk_lcores() result = {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details':", "configparser.MissingSectionHeaderError): filters = '' filters = filters.split(',') map(str.strip, filters) return", "profile = get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name): \"\"\" Returns hardware", "settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): \"\"\" Trace", "with `profile_name` \"\"\" platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles:", "\"\"\" Returns value of vswitch_dpdk_lcores from platform_profile used by Role", "\"\"\" cpu_arr = cpus.split(\",\") binary_mask = 0 for cpu in", "hex value of cpu-mask \"\"\" cpu_arr = cpus.split(\",\") binary_mask =", "logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result = {'category':", "return filters def required_nova_scheduler_filters(): \"\"\" Required nova scheduler_filters by the", "are equal e.g.: is_ranges_equals('2-5', '2-4,5') returns true \"\"\" set1 =", "= 'fail' store_result(logger, result) return result def nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check", "= settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores(): \"\"\"", "\"\"\" Trace vswitch_dpdk_lcores from Airship deployment :return: value traced from", "with `profile_name` \"\"\" processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles:", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores =", "float(cpu_allocation_ratio) def get_role(role_name): \"\"\" Searches and returns role with `role_name`", "Converts CPU mask given in hex to list of cores", "required_value } } if traced_value == required_value: result['criteria'] = 'pass'", "value traced from `vcpu_pin_set` key in nova.conf of actual deployment", "\"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores()", "tools.conf import settings from internal import store_result ########### # Checks", "return filters def trace_cpu_allocation_ratio(): \"\"\" Trace cpu_allocation_ratio from Airship deployment", "get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name): \"\"\" Returns hardware profile details", "if bit == '1': output = output + str(i) +", "from `cpu_allocation_ratio` key in nova.conf of actual deployment \"\"\" try:", "return role_details def get_platform_profile(profile_name): \"\"\" Searches and returns platform_profile with", "used by Role for worker nodes in PDF :return: vnf_reserverd_core", "list of server hardware used in the role \"\"\" hardware_profile", "= binary[::-1] i = 0 output = \"\" for bit", "\"\"\" logger = logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio()", "result def vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value", "and # limitations under the License. \"\"\" Compute Related Checks", "hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if profile['profile_name'] ==", "= 'fail' store_result(logger, result) return result def cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check", "vcpu_pin_set def required_reserved_vnf_cores(): \"\"\" Returns value of vnf_cores from platform_profile", "= set(all_cores).difference(set(non_os_cores)) # return as string with comma separated value", "Searches and returns role with `role_name` \"\"\" roles = settings.getValue('pdf_file')['roles']", "\"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores()", "\"\"\" splits given string into key and value based on", "get_processor_profile(processor_profile) cpus = [] for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return", "'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass'", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "of a role \"\"\" role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']", "in part: a, b = part.split('-') a, b = int(a),", "+ ',' i = i + 1 return output[:-1] def", "set1 == set2 def are_lists_equal(list1, list2): \"\"\" Checks whether two", "Returns value of vswitch_pmd_cores from platform_profile used by Role for", "'details': {'traced_ratio': traced_value, 'required_ratio': required_value } } if traced_value ==", "key[1:] + '\"') config = json.loads(response) if 'pmd-cpu-mask' in config:", "'' return vcpu_pin_set def required_reserved_vnf_cores(): \"\"\" Returns value of vnf_cores", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", ":return: value traced from `enabled_filters` key in nova.conf of actual", "cpu_arr = cpus.split(\",\") binary_mask = 0 for cpu in cpu_arr:", "(configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = '' return vcpu_pin_set def required_reserved_vnf_cores(): \"\"\"", "} } if are_lists_equal(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria']", "with comma separated value return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): \"\"\"", "isolcpus_value def required_isolated_cores(): \"\"\" Returns value of `isolated_cpus` from platform_profile", "'other_config'] response = kube_exec(ovs_pod, cmd) # convert config str to", "e.g.: convert_range_to_list('3-5') will give [3, 4, 5] \"\"\" # pylint:", "from tools.kube_utils import kube_exec, get_pod_with_labels from tools.conf import settings from", "i + 1 return output[:-1] def comma_list_to_hex(cpus): \"\"\" Converts a", "worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores():", "from given range as string e.g.: convert_range_to_list('3-5') will give [3,", "cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf(): \"\"\" Returns parsed nova.conf \"\"\"", "pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod,", "pmd_cores = '' return pmd_cores def required_vswitch_dpdk_lcores(): \"\"\" Returns value", "= get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): \"\"\" Trace os_reserved_cores from", "profile_details def get_processor_profile(profile_name): \"\"\" Searches and returns processor_profile with `profile_name`", "logger = logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"\"\" try: config = get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters') except", "separated value return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): \"\"\" Returns value", "0 for cpu in cpu_arr: binary_mask = binary_mask | (1", "Required cpu_allocation_ratio by the PDF \"\"\" pdf = settings.getValue('pdf_file') cpu_allocation_ratio", "worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores", "of os_reserved_cores from platform_profile used by Role for worker nodes", "part in x.split(','): if '-' in part: a, b =", "re.findall(\"[a-zA-Z0-9-]+=\", response) for key in match: response = response.replace(key, '\"'", "key_value_str: example string `someKey=somevalue` :param delimiter: default delimiter is `=`", "'fail' store_result(logger, result) return result def cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check \"\"\"", "required_nova_scheduler_filters() result = {'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value,", "return result def nova_scheduler_filters_check(): \"\"\" nova_scheduler_filters_check \"\"\" logger = logging.getLogger(__name__)", "actual deployment \"\"\" try: config = get_nova_conf() filters = config.get('filter_scheduler',", "get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus = []", "= set(convert_range_to_list(range2)) return set1 == set2 def are_lists_equal(list1, list2): \"\"\"", "= get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): \"\"\" Trace scheduler_filters from", "`/proc/cmdline` \"\"\" pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd", "'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def", "for worker nodes in PDF :return: isolated_cores value expected by", "Of Delhi. # # Licensed under the Apache License, Version", "convert config str to json str match = re.findall(\"[a-zA-Z0-9-]+=\", response)", "\"\"\" Checks whether two ranges passed as string are equal", "list of cpu cores in corresponding hex value of cpu-mask", "result.append(a) # remove duplicates result = list(dict.fromkeys(result)) return result def", "return result def vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check \"\"\" logger = logging.getLogger(__name__)", "= required_isolated_cores() result = {'category': 'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores':", "in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf(): \"\"\" Returns parsed", "list of numbers from given range as string e.g.: convert_range_to_list('3-5')", "cpu_allocation_ratio = '' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): \"\"\" Required cpu_allocation_ratio", "def trace_vswitch_pmd_cores(): \"\"\" Trace vswitch_pmd_cores from Airship deployment :return: value", "Role for worker nodes in PDF :return: vswitch_pmd_cores value expected", "required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result)", "traced_value, 'required_filters': required_value } } if are_lists_equal(traced_value, required_value): result['criteria'] =", "set(convert_range_to_list(range2)) return set1 == set2 def are_lists_equal(list1, list2): \"\"\" Checks", "\"\"\" roles = settings.getValue('pdf_file')['roles'] for role in roles: if role['name']", "the PDF \"\"\" pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return", "comma_list_to_hex(cpus): \"\"\" Converts a list of cpu cores in corresponding", "for worker nodes in PDF :return: vswitch_pmd_cores value expected by", "of a role \"\"\" role = get_role(role_name) profile = get_platform_profile(role['platform_profile'])", "store_result(logger, result) return result def vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check \"\"\" logger", "if role['name'] == role_name: role_details = role return role_details def", "role in roles: if role['name'] == role_name: role_details = role", "use this file except in compliance with the License. #", "def get_nova_conf(): \"\"\" Returns parsed nova.conf \"\"\" pod = get_pod_with_labels('application=nova,component=compute')", "'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert", "+ vswitch_pmd_cores + vswitch_dpdk_lcores) \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores =", "= get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name): \"\"\" Returns hardware profile", "format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='): \"\"\" splits given string into", "returns role with `role_name` \"\"\" roles = settings.getValue('pdf_file')['roles'] for role", "'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value,", "'\":') match = re.findall(\":[a-zA-Z0-9-]+\", response) for key in match: response", "for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf(): \"\"\"", "the PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return", "in compliance with the License. # You may obtain a", "return isolcpus_value def required_isolated_cores(): \"\"\" Returns value of `isolated_cpus` from", "software # distributed under the License is distributed on an", "map(str.strip, filters) return filters def trace_cpu_allocation_ratio(): \"\"\" Trace cpu_allocation_ratio from", "logger = logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result", "Searches and returns processor_profile with `profile_name` \"\"\" processor_profiles = settings.getValue('pdf_file')['processor_profiles']", "= required_cpu_allocation_ratio() result = {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio':", "config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = '' return vcpu_pin_set", "== required_value: result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger,", "as string are equal e.g.: is_ranges_equals('2-5', '2-4,5') returns true \"\"\"", "\"\"\" key, value = key_value_str.split(delimiter) key = key.strip() value =", "'2-4,5') returns true \"\"\" set1 = set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2))", "hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_pmd_cores(): \"\"\"", "in match: response = response.replace(key, '\"' + key[:-1] + '\":')", "result def os_reserved_cores_check(): \"\"\" os_reserved_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value", "# limitations under the License. \"\"\" Compute Related Checks \"\"\"", "set2 def are_lists_equal(list1, list2): \"\"\" Checks whether two list are", "if '-' in part: a, b = part.split('-') a, b", "\"\"\" Trace cpu_allocation_ratio from Airship deployment :return: value traced from", "= settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores(): \"\"\"", "config str to json str match = re.findall(\"[a-zA-Z0-9-]+=\", response) for", "'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return float(cpu_allocation_ratio) def", "def hex_to_comma_list(hex_mask): \"\"\" Converts CPU mask given in hex to", "vnf_reserved_cores from Airship deployment :return: value traced from `vcpu_pin_set` key", "match: response = response.replace(key[1:], '\"' + key[1:] + '\"') config", "def cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check \"\"\" logger = logging.getLogger(__name__) traced_value =", "kube_exec, get_pod_with_labels from tools.conf import settings from internal import store_result", "PDF \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores']", "'\"' + key[:-1] + '\":') match = re.findall(\":[a-zA-Z0-9-]+\", response) for", "def trace_isolated_cores(): \"\"\" Trace isolated_cores from Airship deployment :return: value", "value of vnf_cores from platform_profile used by Role for worker", "def get_role(role_name): \"\"\" Searches and returns role with `role_name` \"\"\"", "will give [3, 4, 5] \"\"\" # pylint: disable=C0103 result", "if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail'", "profile['profile_name'] == profile_name: profile_details = profile return profile_details def get_platform_profile_by_role(role_name):", "with the License. # You may obtain a copy of", "'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } }", "break return isolcpus_value def required_isolated_cores(): \"\"\" Returns value of `isolated_cpus`", "','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): \"\"\" Returns value of os_reserved_cores from", "key_value_str.split(delimiter) key = key.strip() value = value.strip() return key, value", "logger = logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result", "profile return profile_details def get_cores_by_role(role_name): \"\"\" Returns cpu cores list", "settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name): \"\"\" Searches", "details of a role \"\"\" role = get_role(role_name) hardware_profiles =", "string with comma separated value return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores():", "on delimiter :param key_value_str: example string `someKey=somevalue` :param delimiter: default", "for worker nodes in PDF :return: vnf_reserverd_core value expected by", "cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio()", "remove duplicates result = list(dict.fromkeys(result)) return result def is_ranges_equals(range1, range2):", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "{'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value }", "store_result(logger, result) return result ############### # helper functions ############### def", "ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch',", "'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } }", "def get_processor_profile(profile_name): \"\"\" Searches and returns processor_profile with `profile_name` \"\"\"", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "return set1 == set2 def are_lists_equal(list1, list2): \"\"\" Checks whether", "store_result ########### # Checks ########### def isolated_cores_check(): \"\"\" isolated_cores_check \"\"\"", "result) return result def cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check \"\"\" logger =", "bit in reversed_binary: if bit == '1': output = output", "def trace_os_reserved_cores(): \"\"\" Trace os_reserved_cores from Airship deployment os_reserved_cores =", "\"\"\" try: config = get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except", "traced from `enabled_filters` key in nova.conf of actual deployment \"\"\"", "CONDITIONS OF ANY KIND, either express or implied. # See", "json import re import logging from tools.kube_utils import kube_exec, get_pod_with_labels", "'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } }", "hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus = [] for numa in", "role return role_details def get_platform_profile(profile_name): \"\"\" Searches and returns platform_profile", "a list of cpu cores in corresponding hex value of", "int(a), int(b) result.extend(range(a, b + 1)) elif part != '':", "trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result = {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check',", "key, value] \"\"\" key, value = key_value_str.split(delimiter) key = key.strip()", "\"\"\" Required nova scheduler_filters by the PDF \"\"\" pdf =", "cores list of server hardware used in the role \"\"\"", "used in the role \"\"\" hardware_profile = get_hardware_profile_by_role(role_name) processor_profile =", "Converts a list of cpu cores in corresponding hex value", "get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for", "role['name'] == role_name: role_details = role return role_details def get_platform_profile(profile_name):", "Returns value of `isolated_cpus` from platform_profile used by Role for", "by Role for worker nodes in PDF :return: os_reserved_cores value", "`isolated_cpus` from platform_profile used by Role for worker nodes in", "result ############### # helper functions ############### def trace_isolated_cores(): \"\"\" Trace", "required_value: result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result)", "deployment :return: value traced from `vcpu_pin_set` key in nova.conf of", "= trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))", "\"\"\" hardware_profile = get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile)", "if profile['profile_name'] == profile_name: profile_details = profile return profile_details def", "store_result(logger, result) return result def vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check \"\"\" logger", "config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = '' return pmd_cores", "cmd = ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for option", "list(dict.fromkeys(result)) return result def is_ranges_equals(range1, range2): \"\"\" Checks whether two", "############### def trace_isolated_cores(): \"\"\" Trace isolated_cores from Airship deployment :return:", "= '' return vcpu_pin_set def required_reserved_vnf_cores(): \"\"\" Returns value of", "trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result = {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check',", "cpu cores in corresponding hex value of cpu-mask \"\"\" cpu_arr", "- (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) \"\"\" worker_role = settings.getValue('WORKER_ROLE_NAME')", "`=` :return: [ key, value] \"\"\" key, value = key_value_str.split(delimiter)", "in PDF :return: os_reserved_cores value expected by the PDF \"\"\"", "all_cores - (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) \"\"\" worker_role =", "result def vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check \"\"\" logger = logging.getLogger(__name__) traced_value", "traced_value = trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result = {'category': 'compute',", "traced_value = trace_os_reserved_cores() required_value = required_os_reserved_cores() result = {'category': 'compute',", "get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores(): \"\"\" Trace vswitch_pmd_cores from Airship", "from Airship deployment :return: value traced from `other_config:dpdk-lcore-mask` in openvswitchdb", "a = int(part) result.append(a) # remove duplicates result = list(dict.fromkeys(result))", "pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = '' return pmd_cores def", "== profile_name: profile_details = profile return profile_details def get_platform_profile_by_role(role_name): \"\"\"", "kube_exec(pod, cmd) for option in proc_cmd.split(): if 'isolcpus' in option:", "logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result = {'category':", "else: result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_pmd_cores_check():", "trace_nova_scheduler_filters(): \"\"\" Trace scheduler_filters from Airship deployment :return: value traced", "= binary_mask | (1 << int(cpu)) return format(binary_mask, '02x') def", "in proc_cmd.split(): if 'isolcpus' in option: _, isolcpus_value = split_key_value(option)", "settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if profile['profile_name'] == profile_name: profile_details", "`profile_name` \"\"\" platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles: if", "a, b = int(a), int(b) result.extend(range(a, b + 1)) elif", "deployment \"\"\" try: config = get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters')", "result = [] for part in x.split(','): if '-' in", "= config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = '' filters", "kube_exec(ovs_pod, cmd) # convert config str to json str match", "[] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as", "} if traced_value == required_value: result['criteria'] = 'pass' else: result['criteria']", "'fail' store_result(logger, result) return result ############### # helper functions ###############", "`profile_name` \"\"\" processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if", "set(list2) return set1 == set2 def hex_to_comma_list(hex_mask): \"\"\" Converts CPU", "= hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_dpdk_lcores():", "profile_details = profile return profile_details def get_processor_profile(profile_name): \"\"\" Searches and", "role \"\"\" hardware_profile = get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile =", "required_isolated_cores(): \"\"\" Returns value of `isolated_cpus` from platform_profile used by", "return vcpu_pin_set def required_reserved_vnf_cores(): \"\"\" Returns value of vnf_cores from", "traced_value == required_value: result['criteria'] = 'pass' else: result['criteria'] = 'fail'", "\"\"\" Converts CPU mask given in hex to list of", "profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): \"\"\" Trace scheduler_filters", "\"\"\" role = get_role(role_name) profile = get_platform_profile(role['platform_profile']) return profile def", "result = list(dict.fromkeys(result)) return result def is_ranges_equals(range1, range2): \"\"\" Checks", "def are_lists_equal(list1, list2): \"\"\" Checks whether two list are identicals", "from Airship deployment :return: value traced from `enabled_filters` key in", "in openvswitchdb using ovs-vsctl \"\"\" ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd =", "worker nodes in PDF :return: vswitch_pmd_cores value expected by the", "def required_vswitch_pmd_cores(): \"\"\" Returns value of vswitch_pmd_cores from platform_profile used", "os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as string with comma separated", "response = response.replace(key, '\"' + key[:-1] + '\":') match =", "role = get_role(role_name) profile = get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name):", "get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): \"\"\" Trace scheduler_filters from Airship", "\"\"\" Searches and returns platform_profile with `profile_name` \"\"\" platform_profiles =", "return profile_details def get_cores_by_role(role_name): \"\"\" Returns cpu cores list of", "+ key[:-1] + '\":') match = re.findall(\":[a-zA-Z0-9-]+\", response) for key", "= part.split('-') a, b = int(a), int(b) result.extend(range(a, b +", "vswitch_pmd_cores from Airship deployment :return: value traced from `other_config:pmd-cpu-mask` in", "internal import store_result ########### # Checks ########### def isolated_cores_check(): \"\"\"", "return result def os_reserved_cores_check(): \"\"\" os_reserved_cores_check \"\"\" logger = logging.getLogger(__name__)", "passed as string are equal e.g.: is_ranges_equals('2-5', '2-4,5') returns true", "functions ############### def trace_isolated_cores(): \"\"\" Trace isolated_cores from Airship deployment", "= {'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value", "value based on delimiter :param key_value_str: example string `someKey=somevalue` :param", "trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result = {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check',", "is_ranges_equals('2-5', '2-4,5') returns true \"\"\" set1 = set(convert_range_to_list(range1)) set2 =", "key[:-1] + '\":') match = re.findall(\":[a-zA-Z0-9-]+\", response) for key in", "result = {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio':", "= trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result = {'category': 'compute', 'case_name':", "{'traced_ratio': traced_value, 'required_ratio': required_value } } if traced_value == required_value:", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "vnf_cores from platform_profile used by Role for worker nodes in", "= settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): \"\"\"", "Required nova scheduler_filters by the PDF \"\"\" pdf = settings.getValue('pdf_file')", "= get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set", "# pylint: disable=C0103 result = [] for part in x.split(','):", "(1 << int(cpu)) return format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='): \"\"\"", "kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response) return config ### cpu", "= trace_isolated_cores() required_value = required_isolated_cores() result = {'category': 'compute', 'case_name':", "in match: response = response.replace(key[1:], '\"' + key[1:] + '\"')", "disable=C0103 result = [] for part in x.split(','): if '-'", "worker nodes in PDF :return: os_reserved_cores value expected by the", "deployment :return: value traced from `enabled_filters` key in nova.conf of", "Version 2.0 (the \"License\"); # you may not use this", "worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores():", "= json.loads(response) if 'pmd-cpu-mask' in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else:", "'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = '' return vcpu_pin_set def", "'fail' store_result(logger, result) return result def vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check \"\"\"", "{'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value }", "trace_cpu_allocation_ratio(): \"\"\" Trace cpu_allocation_ratio from Airship deployment :return: value traced", "int(cpu)) return format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='): \"\"\" splits given", "logger = logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result", "'' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): \"\"\" Required cpu_allocation_ratio by the", "option: _, isolcpus_value = split_key_value(option) break return isolcpus_value def required_isolated_cores():", "_, isolcpus_value = split_key_value(option) break return isolcpus_value def required_isolated_cores(): \"\"\"", "response.replace(key[1:], '\"' + key[1:] + '\"') config = json.loads(response) if", "delimiter is `=` :return: [ key, value] \"\"\" key, value", "by applicable law or agreed to in writing, software #", "\"\"\" Returns platform profile details of a role \"\"\" role", "in x.split(','): if '-' in part: a, b = part.split('-')", "required_cpu_allocation_ratio() result = {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value,", "value of cpu-mask \"\"\" cpu_arr = cpus.split(\",\") binary_mask = 0", "equal e.g.: is_ranges_equals('2-5', '2-4,5') returns true \"\"\" set1 = set(convert_range_to_list(range1))", "trace_vswitch_pmd_cores(): \"\"\" Trace vswitch_pmd_cores from Airship deployment :return: value traced", "def isolated_cores_check(): \"\"\" isolated_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value =", "'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters': required_value } } if", "under the License. \"\"\" Compute Related Checks \"\"\" import configparser", "Trace scheduler_filters from Airship deployment :return: value traced from `enabled_filters`", "# helper functions ############### def trace_isolated_cores(): \"\"\" Trace isolated_cores from", "deployment :return: value traced from `isolcpus` key in `/proc/cmdline` \"\"\"", ":return: value traced from `vcpu_pin_set` key in nova.conf of actual", "of `isolated_cpus` from platform_profile used by Role for worker nodes", "Airship deployment :return: value traced from `isolcpus` key in `/proc/cmdline`", "\"\"\" logger = logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores()", ":return: vswitch_dpdk_lcores value expected by the PDF \"\"\" worker_role =", "in the role \"\"\" hardware_profile = get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile']", "profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): \"\"\" Trace os_reserved_cores from Airship deployment os_reserved_cores", "result) return result def vswitch_dpdk_lcores_check(): \"\"\" vswitch_dpdk_lcores_check \"\"\" logger =", "value return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): \"\"\" Returns value of", "a role \"\"\" role = get_role(role_name) profile = get_platform_profile(role['platform_profile']) return", "the PDF \"\"\" pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters", "applicable law or agreed to in writing, software # distributed", "from Airship deployment :return: value traced from `cpu_allocation_ratio` key in", "+ key[1:] + '\"') config = json.loads(response) if 'pmd-cpu-mask' in", "return set1 == set2 def hex_to_comma_list(hex_mask): \"\"\" Converts CPU mask", "= get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores(): \"\"\" Trace vnf_reserved_cores from", "profile_details def get_cores_by_role(role_name): \"\"\" Returns cpu cores list of server", "+ '\"') config = json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores", "`vcpu_pin_set` key in nova.conf of actual deployment \"\"\" try: config", "(configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = '' filters = filters.split(',') map(str.strip, filters)", "get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters =", "value traced from `enabled_filters` key in nova.conf of actual deployment", "os_reserved_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value =", "set(list1) set2 = set(list2) return set1 == set2 def hex_to_comma_list(hex_mask):", "the role \"\"\" hardware_profile = get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile", "for part in x.split(','): if '-' in part: a, b", "json str match = re.findall(\"[a-zA-Z0-9-]+=\", response) for key in match:", "# You may obtain a copy of the License at", "Trace os_reserved_cores from Airship deployment os_reserved_cores = all_cores - (reserved_vnf_cores", "= pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name): \"\"\" Searches and returns", "non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as string with comma", "return result def cpu_allocation_ratio_check(): \"\"\" cpu_allocation_ratio_check \"\"\" logger = logging.getLogger(__name__)", "numbers from given range as string e.g.: convert_range_to_list('3-5') will give", "profile_details def get_platform_profile_by_role(role_name): \"\"\" Returns platform profile details of a", "set(all_cores).difference(set(non_os_cores)) # return as string with comma separated value return", "} } if traced_value == required_value: result['criteria'] = 'pass' else:", "cpu_allocation_ratio from Airship deployment :return: value traced from `cpu_allocation_ratio` key", "\"\"\" Returns hardware profile details of a role \"\"\" role", "helper functions ############### def trace_isolated_cores(): \"\"\" Trace isolated_cores from Airship", "return output[:-1] def comma_list_to_hex(cpus): \"\"\" Converts a list of cpu", "profile details of a role \"\"\" role = get_role(role_name) hardware_profiles", "settings.getValue('pdf_file')['roles'] for role in roles: if role['name'] == role_name: role_details", "= {'category': 'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value", "{'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio': required_value }", "reserved_vnf_cores_check(): \"\"\" reserved_vnf_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores()", "b = int(a), int(b) result.extend(range(a, b + 1)) elif part", "profile['isolated_cpus'] def trace_reserved_vnf_cores(): \"\"\" Trace vnf_reserved_cores from Airship deployment :return:", "response) for key in match: response = response.replace(key[1:], '\"' +", "'02x') def split_key_value(key_value_str, delimiter='='): \"\"\" splits given string into key", "= '' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): \"\"\" Required cpu_allocation_ratio by", "def get_hardware_profile_by_role(role_name): \"\"\" Returns hardware profile details of a role", "'isolcpus' in option: _, isolcpus_value = split_key_value(option) break return isolcpus_value", "cores related helper function def convert_range_to_list(x): \"\"\" Returns list of", "= profile return profile_details def get_platform_profile_by_role(role_name): \"\"\" Returns platform profile", "vswitch_dpdk_lcores_check \"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value =", "given in hex to list of cores \"\"\" binary =", "given range as string e.g.: convert_range_to_list('3-5') will give [3, 4,", "'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value):", "= kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response) return config ###", "are identicals \"\"\" set1 = set(list1) set2 = set(list2) return", "5] \"\"\" # pylint: disable=C0103 result = [] for part", "\"\"\" logger = logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores()", "\"License\"); # you may not use this file except in", "role with `role_name` \"\"\" roles = settings.getValue('pdf_file')['roles'] for role in", "convert_range_to_list('3-5') will give [3, 4, 5] \"\"\" # pylint: disable=C0103", "def vswitch_pmd_cores_check(): \"\"\" vswitch_pmd_cores_check \"\"\" logger = logging.getLogger(__name__) traced_value =", "the License. \"\"\" Compute Related Checks \"\"\" import configparser import", "as string e.g.: convert_range_to_list('3-5') will give [3, 4, 5] \"\"\"", "else: result['criteria'] = 'fail' store_result(logger, result) return result def reserved_vnf_cores_check():", "= settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if profile['profile_name'] == profile_name:", ":param delimiter: default delimiter is `=` :return: [ key, value]", "get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): \"\"\" Trace os_reserved_cores from Airship", "return as string with comma separated value return ','.join(map(str, list(os_reserved_cores)))", "key in nova.conf of actual deployment \"\"\" try: config =", "'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = '' filters = filters.split(',')", "elif part != '': a = int(part) result.append(a) # remove", "hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details = profile return profile_details", "match = re.findall(\"[a-zA-Z0-9-]+=\", response) for key in match: response =", "= {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value", "= get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): \"\"\" Trace vswitch_dpdk_lcores from", "cpus.split(\",\") binary_mask = 0 for cpu in cpu_arr: binary_mask =" ]
[ "either new implemented modules or alternate implementations of already modules.", "intended to have a second implementation beside the main implementation", "new implemented modules or alternate implementations of already modules. This", "implementations of already modules. This directory is intended to have", "the main implementation to have a discussion which implementation to", "\"\"\" Here you find either new implemented modules or alternate", "have a discussion which implementation to favor on the long", "a second implementation beside the main implementation to have a", "modules or alternate implementations of already modules. This directory is", "beside the main implementation to have a discussion which implementation", "is intended to have a second implementation beside the main", "to have a second implementation beside the main implementation to", "have a second implementation beside the main implementation to have", "already modules. This directory is intended to have a second", "implementation to have a discussion which implementation to favor on", "implemented modules or alternate implementations of already modules. This directory", "directory is intended to have a second implementation beside the", "implementation beside the main implementation to have a discussion which", "or alternate implementations of already modules. This directory is intended", "find either new implemented modules or alternate implementations of already", "Here you find either new implemented modules or alternate implementations", "This directory is intended to have a second implementation beside", "main implementation to have a discussion which implementation to favor", "alternate implementations of already modules. This directory is intended to", "a discussion which implementation to favor on the long run.", "of already modules. This directory is intended to have a", "to have a discussion which implementation to favor on the", "you find either new implemented modules or alternate implementations of", "discussion which implementation to favor on the long run. \"\"\"", "modules. This directory is intended to have a second implementation", "second implementation beside the main implementation to have a discussion" ]
[ "list\") def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] ,", "+= chr(b1) header += chr(b2) l = struct.pack(\">Q\", payload_len) header", "= \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring = sqlstring1 + sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring)", "else: b2 |= 127 header += chr(b1) header += chr(b2)", "import MySQLdb import json import EventService import flaskr import tempfile", "''' Created on Mar 6, 2014 @author: tharanga ''' import", "Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #", "is to use sendAll, use \\n at the end data", "IS PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES ##ASSISCIATED", "jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000,", "= es.getClosestImages( 65.0601787, 25.4587107, radius ) self.assertEqual(len(photoList2), 2, \"Length of", "\"vheight\":800} jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797,", "The remote host PORT = 17322 class RestServerTestCase(unittest.TestCase): def setUp(self):", "= \"INSERT INTO Imagedata values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2", "=0x80 | 0x1 & 0x0f b2 = 0 header=\"\" payload_len", "Message rejected\") def test_malformed_Message(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade:", "self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING'] = True self.app = EventService.app.test_client()", "2D3DCapture Server.' in rv.data def test_post_image(self): rv = self.app.post('/postImage') assert", "suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__ == \"__main__\":", "= \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin:", "rv =self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData returns a", "\"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0,", "unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__ == \"__main__\": # #import sys;sys.argv", "wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line seems to get stuck at times.", "\"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} alt = str(jsondata[\"position\"][\"alt\"]); if alt==\"None\":", "self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid Message rejected\") def test_valid_WS_Request(self): message = \"GET", "Mar 6, 2014 @author: tharanga ''' import unittest from time", "# print wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This line seems to get stuck", "opcode\\'\", \"In valid Message rejected\") def test_invalid_Messge(self): message = \"GET", "websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024))", "\"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\",", "\"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\",", "UNIT TESTS IT IS EXPECTED HAVE THE DATABASE ##CREATED. DATABASE", "DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES AND", "NOT PROVIDED. class TestDatabase(unittest.TestCase): def setUp(self): self.connection = es.dbConnect() def", "radius ) self.assertEqual(len(photoList), 4, \"Length of the list should be", "4, \"Length of the list should be equal of the", "from Imagedata where time=\\'2014.3.4_14.40.31\\'' result = es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted data", "\"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0,", "equal of the first test\") for row in photoList: assert", "in result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image name is correctly set and", "photoList = es.getClosestImages( 65.0601787, 25.4583107, radius ) self.assertEqual(len(photoList), 4, \"Length", ", \"getImageData returns a non None list\") def test_get_location_Image_Data(self): rv", "rv.data def test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert 'READY' or '415 Unsupported", "es.saveData(jsondata6) radius = 0.0001 photoList = es.getClosestImages( 65.0601787, 25.4583107, radius", "payload_len) message= header +message elif (payload_len < ((2 ** 16)", "= jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 = \"INSERT INTO Imagedata values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+", "testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval'))", "\"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} alt = str(jsondata[\"position\"][\"alt\"]); if", "IS EXPECTED HAVE THE DATABASE ##CREATED. DATABASE SCRIPT IS PROVIDED", "es.dbConnect() def tearDown(self): self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, \"Database connection", "THE NECESSARY DATABASES AND TABLES ##ASSISCIATED DATA IS NOT PROVIDED.", "opcode\\'\", \"In valid Message rejected\") def test_malformed_Message(self): message = \"GET", "testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket'))", "EventService import WebSocketServer as ws from EventService import EventManager as", "websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024))", ": header = struct.pack('>BB', b1, payload_len) message= header +message elif", "for location based image data\") def test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\",", "= repr(self.testsocket.recv(1024)) # print data self.assertEqual(data, \"\\'json is received\\'\", \"json", "expected opcode\\'\", \"In valid Message rejected\") def test_invalid_Messge(self): message =", "Message rejected\") def test_invalid_Messge(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade:", "not null\") for row in result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image name", "MESSAGE\\'\", \"Messages with out a type is rejected\") def test_wellformed_Message_for_Text(self):", "import socket from base64 import b64encode import struct import MySQLdb", "flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv = self.app.get('/') assert", "testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite = suite() runner = unittest.TextTestRunner(verbosity=3)", "self.assertEqual(data, \"\\'json is received\\'\", \"json Messages is identified and accepted\")", "= repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line seems to get stuck at", "header +message elif (payload_len < ((2 ** 16) - 1)):", "chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse", "Imagedata where time=\\'2014.3.4_14.40.31\\'' result = es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted data is", "<KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" # message = \"Test message\" self.testsocket.sendall(message)", "DATABASE ##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY", "Service for 2D3DCapture Server.' in rv.data def test_post_image(self): rv =", "import unittest from time import sleep import EventService as es", "import sleep import EventService as es from EventService import WebSocketServer", "self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv = self.app.get('/') assert 'This is a", "value\") self.assertEqual(self.wsServer.PORT, 12345, \"Server port is set correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\",", "data self.assertEqual(data, \"\\'Text received\\'\", \"Text Messages is identified and accepted\")", "\"vheight\":800} jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797,", "message= \"Test Message\" self.testsocket.send(message) data = repr(self.testsocket.recv(1024)) #print 'Response to", "= tempfile.mkstemp() EventService.app.config['TESTING'] = True self.app = EventService.app.test_client() flaskr.init_db() #self.socketServer", "\"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0,", "> height : screenorientation= 1.00#landscape else : screenorientation= 0.00#potrait filename", "len(message) if payload_len < 126 : header = struct.pack('>BB', b1,", "= struct.pack(\">H\", payload_len) header += l message = header +message", "WebSocketServer as ws from EventService import EventManager as em import", "\"vwidth\":480, \"vheight\":800} jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125,", "es.getClosestImages( 65.0601787, 25.4587107, radius ) self.assertEqual(len(photoList2), 2, \"Length of the", "\"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} alt = str(jsondata[\"position\"][\"alt\"]);", "message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version:", "of the list should be equal of the first test\")", "repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line seems to get stuck at times.", "payload_len) header += l message = header +message return message", "<KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print", "or '415 Unsupported Media Type' in rv.data def test_get_All_Image_Data(self): rv", "Response is not Empty\") self.testsocket.sendall((\"Test Message\")) data = repr(self.testsocket.recv(1024)) #print", "assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0] def suite(): testsuit =unittest.TestSuite()", "\"vwidth\":480, \"vheight\":800} alt = str(jsondata[\"position\"][\"alt\"]); if alt==\"None\": alt = '0'", "x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This", "\"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0,", "use sendAll, use \\n at the end data = repr(self.testsocket.recv(1024))", "header += chr(b2) l = struct.pack(\">H\", payload_len) header += l", "def test_invalid_Messge(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key:", "ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)", "repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message rejected\") def", "\"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0,", "\"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata3", "\"Server port is set correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost set to", "None list\") def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList']", "\"Database connection accurately set\") jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\",", "wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This line seems", "CREATE THE NECESSARY DATABASES AND TABLES ##ASSISCIATED DATA IS NOT", "b1, payload_len) message= header +message elif (payload_len < ((2 **", "\"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\",", "sqlstring = sqlstring1 + sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting =", "#print 'Response to valid ws request %s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection", "encoded Request %s'%(data) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message", "in rv.data def test_post_image(self): rv = self.app.post('/postImage') assert 'READY' in", "# if __name__ == \"__main__\": # #import sys;sys.argv = ['',", "\"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"},", "image data\") def test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\",", "if alt==\"None\": alt = '0' heading = '0' speed =", "\"Localhost set to 127.0.0.1\") def test_invalid_Request(self): message= \"Test Message\" self.testsocket.send(message)", "row[0] def suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message'))", "imagename, Browser,devicetype,X(location) as latitude, Y(location) as longitude from Imagedata where", "def test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert 'READY' or '415 Unsupported Media", "|= 126 header += chr(b1) header += chr(b2) l =", "\"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0,", "/mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message)", "1.00#landscape else : screenorientation= 0.00#potrait filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 =", "port)) def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '',", "row in result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image name is correctly set", "machine name port = 12345 self.testsocket.connect((host, port)) def tearDown(self): self.wsServer.closeConnection();", "@author: tharanga ''' import unittest from time import sleep import", "test for location based image data\") def test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\",", "\"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0,", "def encodeMessage( message): message = b64encode(message) b1 =0x80 | 0x1", "'localhost' # Get local machine name port = 12345 self.testsocket.connect((host,", "from base64 import b64encode import struct import MySQLdb import json", "saved\") self.assertEqual(row[2], 25.4583105, \"Longitude are saved\") HOST = '127.0.0.1' #", "struct.pack(\">H\", payload_len) header += l message = header +message else:", "0 header=\"\" payload_len = len(message) if payload_len < 126 :", "with out a type is rejected\") def test_wellformed_Message_for_Text(self): message =", "wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line seems", "end data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\", \"Messages with out", "Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #", "struct.pack('>BB', b1, payload_len) message= header +message elif (payload_len < ((2", "wsresponse = repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test Message\") data = repr(self.testsocket.recv(1024)) self.assertEqual(data,", "seems to get stuck at times. Solution is to use", "Solution is to use sendAll, use \\n at the end", "rv.data def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert 'CLOSED' or 'ALREADY_CLOSSED' in", "stuck at times. Solution is to use sendAll, use \\n", "IT IS EXPECTED HAVE THE DATABASE ##CREATED. DATABASE SCRIPT IS", ": screenorientation= 1.00#landscape else : screenorientation= 0.00#potrait filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"]", "\"Test Message\" self.testsocket.send(message) data = repr(self.testsocket.recv(1024)) #print 'Response to invalid", "\"vwidth\":480, \"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius =", "self.assertEqual(len(photoList2), 2, \"Length of the list should be equal of", "< ((2 ** 16) - 1)): b2 |= 126 header", "saved\") self.assertEqual(row[1], 65.0600797, \"Latitudes are saved\") self.assertEqual(row[2], 25.4583105, \"Longitude are", "be equal of the second test\") for row in photoList2:", "in rv.data def test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert 'READY' or '415", "elif (payload_len < ((2 ** 16) - 1)): b2 |=", "** 16) - 1)): b2 |= 126 header += chr(b1)", "= \"Test message\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #print 'Response to", "HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" # message", "+message else: b2 |= 127 header += chr(b1) header +=", "\"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} alt = str(jsondata[\"position\"][\"alt\"]); if alt==\"None\": alt", "second test\") for row in photoList2: assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png'", "=unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath'))", "= repr(self.testsocket.recv(1024)) #print 'Response to invalid message<TestMessage> %s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'',", "test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797,", "rejected\") def test_wellformed_Message_for_Text(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection:", "={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125},", "sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring = sqlstring1 + sqlstring2+", "rejected\") def test_invalid_Messge(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection:", "Message\")) data = repr(self.testsocket.recv(1024)) #print 'Response to un encoded Request", "test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert 'READY' or '415 Unsupported Media Type'", "= 17322 class RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()", "received\\'\", \"json Messages is identified and accepted\") ##TO RUN THE", "rv = self.app.get('/') assert 'This is a REST Service for", "\"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} alt = str(jsondata[\"position\"][\"alt\"]); if alt==\"None\": alt =", "%s'%(data) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message rejected\") def", "to 127.0.0.1\") def test_invalid_Request(self): message= \"Test Message\" self.testsocket.send(message) data =", "is a REST Service for 2D3DCapture Server.' in rv.data def", "THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE DATABASE", "= self.app.post('/postImage') assert 'READY' in rv.data def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver')", "1) # Create a socket object host = 'localhost' #", "out a type is rejected\") def test_wellformed_Message_for_Text(self): message = \"GET", "127.0.0.1\") def test_invalid_Request(self): message= \"Test Message\" self.testsocket.send(message) data = repr(self.testsocket.recv(1024))", "= 'localhost' # Get local machine name port = 12345", "def test_rootpath(self): rv = self.app.get('/') assert 'This is a REST", "feature test for location based image data\") def test_closest_Image_retrieval(self): jsondata1", "testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image'))", "\"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata2 ={\"type\":\"image\",", "testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit", "class TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket", "use \\n at the end data = repr(self.testsocket.recv(1024)) # print", "Type' in rv.data def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data)", "'image_2014.3.4_14.40.35.png' in row[0] def suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge'))", "chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line", "\"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0,", "'ALREADY_CLOSSED' in rv.data def test_start_websocket(self): rv =self.app.get('/startwebsocketserver') # print rv.data", "result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image name is correctly set and saved\")", "- 1)): b2 |= 126 header += chr(b1) header +=", "set correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost set to 127.0.0.1\") def test_invalid_Request(self):", "def setUp(self): self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket = socket.socket(socket.AF_INET,", "\"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\",", "header = struct.pack('>BB', b1, payload_len) message= header +message elif (payload_len", "=self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData returns a non", "'\\'CONNECTION_REJECTED\\'', \"Connection is not rejected\") self.assertIsNotNone(wsresponse, \"Connection Response is not", "values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\"", "\",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring = sqlstring1 + sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting", "\"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0,", "es.dbInsert(sqlstring) sqlreadsting = 'select imagename, Browser,devicetype,X(location) as latitude, Y(location) as", "correctly set and saved\") self.assertEqual(row[1], 65.0600797, \"Latitudes are saved\") self.assertEqual(row[2],", "rv =self.app.post('/closewebsocketserver') assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data def test_start_websocket(self):", "host = 'localhost' # Get local machine name port =", "message = \"Test message\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #print 'Response", "valid ws request %s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection is not rejected\")", "height =jsondata[\"vheight\"] if width > height : screenorientation= 1.00#landscape else", "port is set correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost set to 127.0.0.1\")", "13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"Test", "runner.run(suite) # if __name__ == \"__main__\": # #import sys;sys.argv =", "self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message rejected\") def test_invalid_Messge(self):", "def test_invalid_Request(self): message= \"Test Message\" self.testsocket.send(message) data = repr(self.testsocket.recv(1024)) #print", "socket.SO_REUSEADDR, 1) # Create a socket object host = 'localhost'", "import WebSocketServer as ws from EventService import EventManager as em", "# print data self.assertEqual(data, \"\\'json is received\\'\", \"json Messages is", "b2 = 0 header=\"\" payload_len = len(message) if payload_len <", "data = repr(self.testsocket.recv(1024)) #print 'Response to invalid message<TestMessage> %s'%(data) self.assertEqual(data,", "\"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0,", "\"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0,", "'image_2014.3.4_14.40.31.png' in row[0] photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius )", "photoList: assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0] photoList2 = es.getClosestImages(", "not Empty\") self.testsocket.sendall((\"Test Message\")) data = repr(self.testsocket.recv(1024)) #print 'Response to", "= unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__ == \"__main__\": # #import", "es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius = 0.0001 photoList = es.getClosestImages( 65.0601787,", "\"Messages with out a type is rejected\") def test_wellformed_Message_for_Text(self): message", "=self.app.post('/closewebsocketserver') assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data def test_start_websocket(self): rv", "es.saveData(jsondata5) es.saveData(jsondata6) radius = 0.0001 photoList = es.getClosestImages( 65.0601787, 25.4583107,", "from EventService import WebSocketServer as ws from EventService import EventManager", "self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image name is correctly set and saved\") self.assertEqual(row[1],", "'select imagename, Browser,devicetype,X(location) as latitude, Y(location) as longitude from Imagedata", "\"vheight\":800} jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797,", "\"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0,", "={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125},", "list.This is a feature test for location based image data\")", "is not rejected\") self.assertIsNotNone(wsresponse, \"Connection Response is not Empty\") self.testsocket.sendall((\"Test", "= es.dbConnect() def tearDown(self): self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, \"Database", "is retrieved and it is not null\") for row in", "\"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\",", "self.assertEqual(len(photoList), 4, \"Length of the list should be equal of", "65.0601787, 25.4587107, radius ) self.assertEqual(len(photoList2), 2, \"Length of the list", "import json import EventService import flaskr import tempfile def encodeMessage(", "es from EventService import WebSocketServer as ws from EventService import", "or 'image_2014.3.4_14.40.35.png' in row[0] def suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request'))", "= '127.0.0.1' # The remote host PORT = 17322 class", "HOST = '127.0.0.1' # The remote host PORT = 17322", "is received\\'\", \"json Messages is identified and accepted\") ##TO RUN", "= '0' speed = '0' width = jsondata[\"vwidth\"] height =jsondata[\"vheight\"]", "row in photoList: assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0] photoList2", "identified and accepted\") def test_wellformed_Message_for_Json(self): message = \"GET /mychat HTTP/1.1\\nHost:", "socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a socket object host", "\"vheight\":800} alt = str(jsondata[\"position\"][\"alt\"]); if alt==\"None\": alt = '0' heading", "em import socket from base64 import b64encode import struct import", "message= header +message elif (payload_len < ((2 ** 16) -", "local machine name port = 12345 self.testsocket.connect((host, port)) def tearDown(self):", "#print 'Response to invalid message<TestMessage> %s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid Message", "13\\nOrigin: localhost\\n\\n\" # message = \"Test message\" self.testsocket.sendall(message) wsresponse =", "sendAll, use \\n at the end data = repr(self.testsocket.recv(1024)) #", "self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line", "=self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData returns a non", "\"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0,", "\"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius", "or 'image_2014.3.4_14.40.31.png' in row[0] photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius", "es.getClosestImages( 65.0601787, 25.4583107, radius ) self.assertEqual(len(photoList), 4, \"Length of the", "as es from EventService import WebSocketServer as ws from EventService", "IS NOT PROVIDED. class TestDatabase(unittest.TestCase): def setUp(self): self.connection = es.dbConnect()", "heading = '0' speed = '0' width = jsondata[\"vwidth\"] height", "data = repr(self.testsocket.recv(1024)) #print 'Response to un encoded Request %s'%(data)", "NECESSARY DATABASES AND TABLES ##ASSISCIATED DATA IS NOT PROVIDED. class", "self.app = EventService.app.test_client() flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv", "in rv.data def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList']", "25.4583105, \"Longitude are saved\") HOST = '127.0.0.1' # The remote", "\"vwidth\":480, \"vheight\":800} jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115,", "line seems to get stuck at times. Solution is to", "connection accurately set\") jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\",", "a REST Service for 2D3DCapture Server.' in rv.data def test_post_image(self):", "\"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata4", "assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0] photoList2 = es.getClosestImages( 65.0601787,", "wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This line seems to get stuck at times.", "\"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\",", "from time import sleep import EventService as es from EventService", "str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring = sqlstring1", "+= l message = header +message else: b2 |= 127", "class TestDatabase(unittest.TestCase): def setUp(self): self.connection = es.dbConnect() def tearDown(self): self.connection.close()", "HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse", "self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection is not rejected\") self.assertIsNotNone(wsresponse, \"Connection Response is", "import EventManager as em import socket from base64 import b64encode", "valid Message rejected\") def test_invalid_Messge(self): message = \"GET /mychat HTTP/1.1\\nHost:", "('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring =", "def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData", "%s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid Message rejected\") def test_valid_WS_Request(self): message =", "\"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\",", "where time=\\'2014.3.4_14.40.31\\'' result = es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted data is retrieved", "''' import unittest from time import sleep import EventService as", "message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version:", "TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket =", "\"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\",", ") self.assertEqual(len(photoList), 4, \"Length of the list should be equal", "126 header += chr(b1) header += chr(b2) l = struct.pack(\">H\",", "list should be equal of the second test\") for row", "l = struct.pack(\">H\", payload_len) header += l message = header", "= \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin:", "25.4583107, radius ) self.assertEqual(len(photoList), 4, \"Length of the list should", "es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius = 0.0001 photoList =", "self.assertEqual(self.wsServer.PORT, 12345, \"Server port is set correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost", "chr(b2) l = struct.pack(\">Q\", payload_len) header += l message =", "\"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0,", "in rv.data def test_start_websocket(self): rv =self.app.get('/startwebsocketserver') # print rv.data assert", "desired value\") self.assertEqual(self.wsServer.PORT, 12345, \"Server port is set correctly\") self.assertEqual(self.wsServer.LOCALHOST,", "received\\'\", \"Text Messages is identified and accepted\") def test_wellformed_Message_for_Json(self): message", "\"vheight\":800} jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797,", "= self.app.get('/') assert 'This is a REST Service for 2D3DCapture", "def test_malformed_Message(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key:", "TO CREATE THE NECESSARY DATABASES AND TABLES ##ASSISCIATED DATA IS", "b64encode import struct import MySQLdb import json import EventService import", "def setUp(self): self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING'] = True self.app", "# print rv.data assert 'READY' in rv.data def test_post_binary_image(self): rv", "ws from EventService import EventManager as em import socket from", "16) - 1)): b2 |= 126 header += chr(b1) header", "import EventService as es from EventService import WebSocketServer as ws", "data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message", "\"image_2014.3.4_14.40.30.png\", \"Image name is correctly set and saved\") self.assertEqual(row[1], 65.0600797,", "sendAll, use \\n at the end data = repr(self.testsocket.recv(1024)) self.assertEqual(data,", "None list.This is a feature test for location based image", "\"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0,", "13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test", "=self.app.get('/startwebsocketserver') # print rv.data assert 'READY' in rv.data def test_post_binary_image(self):", "MySQLdb import json import EventService import flaskr import tempfile def", "Media Type' in rv.data def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg =", "import tempfile def encodeMessage( message): message = b64encode(message) b1 =0x80", "sqlreadsting = 'select imagename, Browser,devicetype,X(location) as latitude, Y(location) as longitude", "EventService import flaskr import tempfile def encodeMessage( message): message =", "rejected\") def test_valid_WS_Request(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection:", "and accepted\") def test_wellformed_Message_for_Json(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade:", "PORT = 17322 class RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd, flaskr.app.config['DATABASE'] =", "header +message else: b2 |= 127 header += chr(b1) header", "test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data def", "server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse =", "\"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0,", "header += l message = header +message else: b2 |=", "object host = 'localhost' # Get local machine name port", "to the desired value\") self.assertEqual(self.wsServer.PORT, 12345, \"Server port is set", "\"vwidth\":480, \"vheight\":800} jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115,", "retrieved and it is not null\") for row in result:", "\"127.0.0.1\", \"Localhost set to 127.0.0.1\") def test_invalid_Request(self): message= \"Test Message\"", "repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\", \"Messages with out a type is", "test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', \"Server set to the desired value\") self.assertEqual(self.wsServer.PORT,", "self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a", "TESTS IT IS EXPECTED HAVE THE DATABASE ##CREATED. DATABASE SCRIPT", "sleep(1) self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create", "Created on Mar 6, 2014 @author: tharanga ''' import unittest", "sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting = 'select imagename, Browser,devicetype,X(location) as latitude,", "a non None list.This is a feature test for location", "+= chr(b2) l = struct.pack(\">Q\", payload_len) header += l message", "request %s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection is not rejected\") self.assertIsNotNone(wsresponse, \"Connection", "\"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\"", "is not Empty\") self.testsocket.sendall((\"Test Message\")) data = repr(self.testsocket.recv(1024)) #print 'Response", "as longitude from Imagedata where time=\\'2014.3.4_14.40.31\\'' result = es.dbRead(sqlreadsting) self.assertIsNotNone(result,", "testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return", "\"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\",", "= 0 header=\"\" payload_len = len(message) if payload_len < 126", "localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line seems to", "is correctly set and saved\") self.assertEqual(row[1], 65.0600797, \"Latitudes are saved\")", "radius ) self.assertEqual(len(photoList2), 2, \"Length of the list should be", "if width > height : screenorientation= 1.00#landscape else : screenorientation=", "\"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0,", "Message\") data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid", "65.0600797, \"Latitudes are saved\") self.assertEqual(row[2], 25.4583105, \"Longitude are saved\") HOST", "end data = repr(self.testsocket.recv(1024)) # print data self.assertEqual(data, \"\\'json is", "rv.data def test_start_websocket(self): rv =self.app.get('/startwebsocketserver') # print rv.data assert 'READY'", "test\") for row in photoList: assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in", "test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, \"Database connection accurately set\") jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\",", "test\") for row in photoList2: assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in", "\"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0,", "screenorientation= 0.00#potrait filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 = \"INSERT INTO Imagedata", "time=\\'2014.3.4_14.40.31\\'' result = es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted data is retrieved and", "Create a socket object host = 'localhost' # Get local", "TABLES ##ASSISCIATED DATA IS NOT PROVIDED. class TestDatabase(unittest.TestCase): def setUp(self):", "setUp(self): self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES", "##TO RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE", "localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This", "self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData returns a non None list.This is a", ", \"getLocationImageData returns a non None list.This is a feature", "\"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0,", "\"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4)", "message\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #print 'Response to valid ws", "server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse =", "\"Length of the list should be equal of the second", "#self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv = self.app.get('/') assert 'This", "\"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\",", "header +message return message class TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer =", "row in photoList2: assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0] def", "2, \"Length of the list should be equal of the", "repr(self.testsocket.recv(1024)) #print 'Response to un encoded Request %s'%(data) self.assertEqual(data, \"\\'Un", "testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket'))", "a socket object host = 'localhost' # Get local machine", "rv =self.app.post('/postBinaryImage') assert 'READY' or '415 Unsupported Media Type' in", "'Response to un encoded Request %s'%(data) self.assertEqual(data, \"\\'Un expected opcode\\'\",", "tharanga ''' import unittest from time import sleep import EventService", "alt = str(jsondata[\"position\"][\"alt\"]); if alt==\"None\": alt = '0' heading =", "= self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv = self.app.get('/') assert 'This is", "Messages is identified and accepted\") def test_wellformed_Message_for_Json(self): message = \"GET", "non None list\") def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data)", "\"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0,", "the list should be equal of the second test\") for", "'415 Unsupported Media Type' in rv.data def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData')", "Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" # message = \"Test message\"", "set\") jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797,", "for row in photoList: assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0]", "0x0f b2 = 0 header=\"\" payload_len = len(message) if payload_len", "data\") def test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\",", "to invalid message<TestMessage> %s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid Message rejected\") def", "test_invalid_Messge(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol:", "\"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480,", "\"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\",", "header += l message = header +message return message class", "message = header +message return message class TestWebSockets(unittest.TestCase): def setUp(self):", "first test\") for row in photoList: assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png'", "+= chr(b1) header += chr(b2) l = struct.pack(\">H\", payload_len) header", "126 : header = struct.pack('>BB', b1, payload_len) message= header +message", "self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a socket object host =", "= json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData returns a non None list.This", "non None list.This is a feature test for location based", "self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', \"Server set to", "self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\", \"Messages with out a type is rejected\")", "should be equal of the first test\") for row in", "valid Message rejected\") def test_malformed_Message(self): message = \"GET /mychat HTTP/1.1\\nHost:", "\"\\'json is received\\'\", \"json Messages is identified and accepted\") ##TO", "print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line seems to get stuck at", "((2 ** 16) - 1)): b2 |= 126 header +=", "Message rejected\") def test_valid_WS_Request(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade:", "def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', \"Server", "= repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This line seems to", "jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000,", "Messages is identified and accepted\") ##TO RUN THE FOLLOWING UNIT", "= '0' width = jsondata[\"vwidth\"] height =jsondata[\"vheight\"] if width >", "are saved\") self.assertEqual(row[2], 25.4583105, \"Longitude are saved\") HOST = '127.0.0.1'", "self.testsocket.send(encodeMessage(\"Test Message\"))#This line seems to get stuck at times. Solution", "= 'select imagename, Browser,devicetype,X(location) as latitude, Y(location) as longitude from", "to use sendAll, use \\n at the end data =", "data is retrieved and it is not null\") for row", "\"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\",", "Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) sleep(1)", "struct import MySQLdb import json import EventService import flaskr import", "saved\") HOST = '127.0.0.1' # The remote host PORT =", "type is rejected\") def test_wellformed_Message_for_Text(self): message = \"GET /mychat HTTP/1.1\\nHost:", "json import EventService import flaskr import tempfile def encodeMessage( message):", "row[0] photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius ) self.assertEqual(len(photoList2), 2,", "test_wellformed_Message_for_Json(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol:", "for row in photoList2: assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0]", "\"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637,", "\"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0,", "HAVE THE DATABASE ##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE", "= es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted data is retrieved and it is", "self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, \"Database connection accurately set\") jsondata", "'0' width = jsondata[\"vwidth\"] height =jsondata[\"vheight\"] if width > height", "\\n at the end data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\",", "6, 2014 @author: tharanga ''' import unittest from time import", "json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData returns a non None list.This is", "jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000,", "host PORT = 17322 class RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd, flaskr.app.config['DATABASE']", "= jsondata[\"vwidth\"] height =jsondata[\"vheight\"] if width > height : screenorientation=", "data = repr(self.testsocket.recv(1024)) # print data self.assertEqual(data, \"\\'json is received\\'\",", "tearDown(self): self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, \"Database connection accurately set\")", "self.testsocket.connect((host, port)) def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER,", "# print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line seems to get stuck", "\"Image name is correctly set and saved\") self.assertEqual(row[1], 65.0600797, \"Latitudes", "EventService as es from EventService import WebSocketServer as ws from", "chr(b1) header += chr(b2) l = struct.pack(\">H\", payload_len) header +=", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a socket object", "for 2D3DCapture Server.' in rv.data def test_post_image(self): rv = self.app.post('/postImage')", "port = 12345 self.testsocket.connect((host, port)) def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1)", "self.assertIsNotNone(result, \"Inserted data is retrieved and it is not null\")", "in row[0] photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius ) self.assertEqual(len(photoList2),", "localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This", "from EventService import EventManager as em import socket from base64", "sleep(1) self.testsocket.sendall(\"Test Message\") data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un expected opcode\\'\",", "jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000,", "'READY' in rv.data def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert 'CLOSED' or", "self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message rejected\") def test_malformed_Message(self):", "sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', \"Server set to the desired", "'READY' in rv.data def test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert 'READY' or", "def test_valid_WS_Request(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key:", "'Response to valid ws request %s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection is", "RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING'] = True", "'READY' or '415 Unsupported Media Type' in rv.data def test_get_All_Image_Data(self):", "the list should be equal of the first test\") for", "import struct import MySQLdb import json import EventService import flaskr", "self.connection = es.dbConnect() def tearDown(self): self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection,", "def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data", "\"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6)", "at the end data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\", \"Messages", "in rv.data def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert 'CLOSED' or 'ALREADY_CLOSSED'", "wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line seems to get stuck", "accurately set\") jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105,", "& 0x0f b2 = 0 header=\"\" payload_len = len(message) if", "chr(b1) header += chr(b2) l = struct.pack(\">Q\", payload_len) header +=", "PROVIDED. class TestDatabase(unittest.TestCase): def setUp(self): self.connection = es.dbConnect() def tearDown(self):", "es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius = 0.0001 photoList = es.getClosestImages(", "returns a non None list.This is a feature test for", "\"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\",", "jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 = \"INSERT INTO Imagedata values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\"", "time import sleep import EventService as es from EventService import", "assert 'READY' in rv.data def test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert 'READY'", "\"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring = sqlstring1 +", "test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData returns", "sqlstring1 + sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting = 'select imagename,", "= EventService.app.test_client() flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv =", "1)): b2 |= 126 header += chr(b1) header += chr(b2)", "\"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\"", "test_rootpath(self): rv = self.app.get('/') assert 'This is a REST Service", "/mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message)", "localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test Message\") data =", "is set correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost set to 127.0.0.1\") def", "self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line seems to get", "= repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message rejected\")", "0.00#potrait filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 = \"INSERT INTO Imagedata values", "REST Service for 2D3DCapture Server.' in rv.data def test_post_image(self): rv", "\\n at the end data = repr(self.testsocket.recv(1024)) print data self.assertEqual(data,", "radius = 0.0001 photoList = es.getClosestImages( 65.0601787, 25.4583107, radius )", "Message\" self.testsocket.send(message) data = repr(self.testsocket.recv(1024)) #print 'Response to invalid message<TestMessage>", "\"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657,", "\"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} es.saveData(jsondata1)", "<KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print", "\"\\'Un expected opcode\\'\", \"In valid Message rejected\") def test_malformed_Message(self): message", "at the end data = repr(self.testsocket.recv(1024)) # print data self.assertEqual(data,", "={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125},", "test_valid_WS_Request(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol:", "print data self.assertEqual(data, \"\\'json is received\\'\", \"json Messages is identified", "\"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0,", "\"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0,", "data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\", \"Messages with out a", "test_wellformed_Message_for_Text(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol:", "is identified and accepted\") def test_wellformed_Message_for_Json(self): message = \"GET /mychat", "= 0.0001 photoList = es.getClosestImages( 65.0601787, 25.4583107, radius ) self.assertEqual(len(photoList),", "suite = suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__", "to un encoded Request %s'%(data) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In", "SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES", "rejected\") def test_malformed_Message(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection:", "\"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0,", "\"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\",", "= struct.pack(\">Q\", payload_len) header += l message = header +message", "def test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105,", "use \\n at the end data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED", "0x1 & 0x0f b2 = 0 header=\"\" payload_len = len(message)", "repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test Message\") data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un expected", "set and saved\") self.assertEqual(row[1], 65.0600797, \"Latitudes are saved\") self.assertEqual(row[2], 25.4583105,", "={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125},", "+= chr(b2) l = struct.pack(\">H\", payload_len) header += l message", "print wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This line seems to get stuck at", "based image data\") def test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\",", "FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE DATABASE ##CREATED.", "EventService.app.config['TESTING'] = True self.app = EventService.app.test_client() flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1')", "b1 =0x80 | 0x1 & 0x0f b2 = 0 header=\"\"", "if payload_len < 126 : header = struct.pack('>BB', b1, payload_len)", "the desired value\") self.assertEqual(self.wsServer.PORT, 12345, \"Server port is set correctly\")", "13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line seems", "Y(location) as longitude from Imagedata where time=\\'2014.3.4_14.40.31\\'' result = es.dbRead(sqlreadsting)", "\"az\":0, \"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800}", "THE DATABASE ##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE", "== \"__main__\": # #import sys;sys.argv = ['', 'Test.testName'] # unittest.main()", "\"INSERT INTO Imagedata values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"])", "= repr(self.testsocket.recv(1024)) print data self.assertEqual(data, \"\\'Text received\\'\", \"Text Messages is", "repr(self.testsocket.recv(1024)) #print 'Response to valid ws request %s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'',", "alt==\"None\": alt = '0' heading = '0' speed = '0'", "##ASSISCIATED DATA IS NOT PROVIDED. class TestDatabase(unittest.TestCase): def setUp(self): self.connection", "= sqlstring1 + sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting = 'select", "\"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2)", "sendAll, use \\n at the end data = repr(self.testsocket.recv(1024)) print", ") self.assertEqual(len(photoList2), 2, \"Length of the list should be equal", "Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage(\"2<---->Test", "\"\\'Un expected opcode\\'\", \"In valid Message rejected\") def test_invalid_Messge(self): message", "name is correctly set and saved\") self.assertEqual(row[1], 65.0600797, \"Latitudes are", "rv.data def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] ,", "\"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\",", "of the first test\") for row in photoList: assert 'image_2014.3.4_14.40.32.png'", "l = struct.pack(\">Q\", payload_len) header += l message = header", "websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" # message = \"Test", "chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" # message = \"Test message\" self.testsocket.sendall(message) wsresponse", "setUp(self): self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING'] = True self.app =", "= repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line seems to", "the end data = repr(self.testsocket.recv(1024)) # print data self.assertEqual(data, \"\\'json", "+message elif (payload_len < ((2 ** 16) - 1)): b2", "rv.data assert 'READY' in rv.data def test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert", "'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0] photoList2 = es.getClosestImages( 65.0601787, 25.4587107,", "__name__ == \"__main__\": # #import sys;sys.argv = ['', 'Test.testName'] #", "l message = header +message return message class TestWebSockets(unittest.TestCase): def", "message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version:", "EventService.app.test_client() flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv = self.app.get('/')", "Request %s'%(data) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid Message rejected\")", "DATABASES AND TABLES ##ASSISCIATED DATA IS NOT PROVIDED. class TestDatabase(unittest.TestCase):", "in row[0] def suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request'))", "\"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0,", "as ws from EventService import EventManager as em import socket", "\"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata2", "it is not null\") for row in result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\",", "\"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\",", "end data = repr(self.testsocket.recv(1024)) print data self.assertEqual(data, \"\\'Text received\\'\", \"Text", "self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET,", "import EventService import flaskr import tempfile def encodeMessage( message): message", "\"\\'Text received\\'\", \"Text Messages is identified and accepted\") def test_wellformed_Message_for_Json(self):", "= es.getClosestImages( 65.0601787, 25.4583107, radius ) self.assertEqual(len(photoList), 4, \"Length of", "testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite = suite() runner =", "\"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\",", "2014 @author: tharanga ''' import unittest from time import sleep", "self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test Message\") data = repr(self.testsocket.recv(1024))", "unittest from time import sleep import EventService as es from", "Server.' in rv.data def test_post_image(self): rv = self.app.post('/postImage') assert 'READY'", "self.assertIsNotNone(wsresponse, \"Connection Response is not Empty\") self.testsocket.sendall((\"Test Message\")) data =", "repr(self.testsocket.recv(1024)) # print data self.assertEqual(data, \"\\'json is received\\'\", \"json Messages", "str(jsondata[\"position\"][\"alt\"]); if alt==\"None\": alt = '0' heading = '0' speed", "on Mar 6, 2014 @author: tharanga ''' import unittest from", "a feature test for location based image data\") def test_closest_Image_retrieval(self):", "test_post_image(self): rv = self.app.post('/postImage') assert 'READY' in rv.data def test_clossing_websocket(self):", "remote host PORT = 17322 class RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd,", "photoList2: assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0] def suite(): testsuit", "def suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text'))", "to valid ws request %s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection is not", "\"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0,", "a non None list\") def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg =", "\"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\",", "jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData returns a non None", "\"json Messages is identified and accepted\") ##TO RUN THE FOLLOWING", "={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125},", "# message = \"Test message\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #print", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a socket", "# Get local machine name port = 12345 self.testsocket.connect((host, port))", "self.testsocket.close() sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', \"Server set to the", "#print(sqlstring) es.dbInsert(sqlstring) sqlreadsting = 'select imagename, Browser,devicetype,X(location) as latitude, Y(location)", "repr(self.testsocket.recv(1024)) #print 'Response to invalid message<TestMessage> %s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid", "header=\"\" payload_len = len(message) if payload_len < 126 : header", "\"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\",", "\"vwidth\":480, \"vheight\":800} jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125,", "(\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring", "of the list should be equal of the second test\")", "server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse =", "True self.app = EventService.app.test_client() flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self):", "self.testsocket.sendall((\"Test Message\")) data = repr(self.testsocket.recv(1024)) #print 'Response to un encoded", "# Create a socket object host = 'localhost' # Get", "+ sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting = 'select imagename, Browser,devicetype,X(location)", "or 'ALREADY_CLOSSED' in rv.data def test_start_websocket(self): rv =self.app.get('/startwebsocketserver') # print", "= True self.app = EventService.app.test_client() flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def", "name port = 12345 self.testsocket.connect((host, port)) def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close()", "latitude, Y(location) as longitude from Imagedata where time=\\'2014.3.4_14.40.31\\'' result =", "\"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5)", "in photoList: assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0] photoList2 =", "25.4587107, radius ) self.assertEqual(len(photoList2), 2, \"Length of the list should", "AND TABLES ##ASSISCIATED DATA IS NOT PROVIDED. class TestDatabase(unittest.TestCase): def", "is not null\") for row in result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image", "\"Connection Response is not Empty\") self.testsocket.sendall((\"Test Message\")) data = repr(self.testsocket.recv(1024))", "import b64encode import struct import MySQLdb import json import EventService", "b2 |= 126 header += chr(b1) header += chr(b2) l", "ws request %s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection is not rejected\") self.assertIsNotNone(wsresponse,", "jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000,", "flaskr import tempfile def encodeMessage( message): message = b64encode(message) b1", "= b64encode(message) b1 =0x80 | 0x1 & 0x0f b2 =", "identified and accepted\") ##TO RUN THE FOLLOWING UNIT TESTS IT", "sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring = sqlstring1 + sqlstring2+ sqlstring3 #print(sqlstring)", "message<TestMessage> %s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid Message rejected\") def test_valid_WS_Request(self): message", "the end data = repr(self.testsocket.recv(1024)) print data self.assertEqual(data, \"\\'Text received\\'\",", "assert 'READY' in rv.data def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert 'CLOSED'", "\"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0,", "assert 'READY' or '415 Unsupported Media Type' in rv.data def", "setUp(self): self.connection = es.dbConnect() def tearDown(self): self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection,", "self.testsocket.send(encodeMessage(\"2<---->Test Message\"))#This line seems to get stuck at times. Solution", "'\\'CONNECTION_REJECTED\\'', \"Invalid Message rejected\") def test_valid_WS_Request(self): message = \"GET /mychat", "class RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING'] =", "| 0x1 & 0x0f b2 = 0 header=\"\" payload_len =", "rv =self.app.get('/startwebsocketserver') # print rv.data assert 'READY' in rv.data def", "\"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius = 0.0001", "times. Solution is to use sendAll, use \\n at the", "\"Connection is not rejected\") self.assertIsNotNone(wsresponse, \"Connection Response is not Empty\")", "jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData returns a non None", "self.app.post('/postImage') assert 'READY' in rv.data def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert", "should be equal of the second test\") for row in", "RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE", "sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting = 'select imagename, Browser,devicetype,X(location) as", "# The remote host PORT = 17322 class RestServerTestCase(unittest.TestCase): def", "alt = '0' heading = '0' speed = '0' width", "17322 class RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING']", "\"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata5", "\"Latitudes are saved\") self.assertEqual(row[2], 25.4583105, \"Longitude are saved\") HOST =", "message): message = b64encode(message) b1 =0x80 | 0x1 & 0x0f", "use \\n at the end data = repr(self.testsocket.recv(1024)) print data", "accepted\") ##TO RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED", "header += chr(b1) header += chr(b2) l = struct.pack(\">H\", payload_len)", "return message class TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True);", "repr(self.testsocket.recv(1024)) print data self.assertEqual(data, \"\\'Text received\\'\", \"Text Messages is identified", "message = b64encode(message) b1 =0x80 | 0x1 & 0x0f b2", "\"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\"", "\"vwidth\":480, \"vheight\":800} jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104,", "header += chr(b2) l = struct.pack(\">Q\", payload_len) header += l", "as em import socket from base64 import b64encode import struct", "Unsupported Media Type' in rv.data def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg", "MySQLdb.connection, \"Database connection accurately set\") jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\",", "\"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0,", "print rv.data assert 'READY' in rv.data def test_post_binary_image(self): rv =self.app.post('/postBinaryImage')", "def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData", "are saved\") HOST = '127.0.0.1' # The remote host PORT", "tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', \"Server set", "\"\\'MISFORMATED MESSAGE\\'\", \"Messages with out a type is rejected\") def", "return testsuit suite = suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) #", "EventManager as em import socket from base64 import b64encode import", "= ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,", "photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius ) self.assertEqual(len(photoList2), 2, \"Length", "self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData returns a non None list\") def test_get_location_Image_Data(self):", "localhost\\n\\n\" # message = \"Test message\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024))", "=self.app.post('/postBinaryImage') assert 'READY' or '415 Unsupported Media Type' in rv.data", "self.assertEqual(row[1], 65.0600797, \"Latitudes are saved\") self.assertEqual(row[2], 25.4583105, \"Longitude are saved\")", "\"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata6 ={\"type\":\"image\",", "to get stuck at times. Solution is to use sendAll,", "<KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test", "self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost set to 127.0.0.1\") def test_invalid_Request(self): message= \"Test", "= struct.pack('>BB', b1, payload_len) message= header +message elif (payload_len <", "is rejected\") def test_wellformed_Message_for_Text(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade:", "12345 self.testsocket.connect((host, port)) def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def test_webSocketServerOBject(self):", "self.assertEqual(row[2], 25.4583105, \"Longitude are saved\") HOST = '127.0.0.1' # The", "b64encode(message) b1 =0x80 | 0x1 & 0x0f b2 = 0", "b2 |= 127 header += chr(b1) header += chr(b2) l", "server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" # message =", "es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted data is retrieved and it is not", "null\") for row in result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image name is", "rv.data def test_post_image(self): rv = self.app.post('/postImage') assert 'READY' in rv.data", "\"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} alt", "\"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0,", "rv = self.app.post('/postImage') assert 'READY' in rv.data def test_clossing_websocket(self): rv", "= str(jsondata[\"position\"][\"alt\"]); if alt==\"None\": alt = '0' heading = '0'", "jsondata[\"vwidth\"] height =jsondata[\"vheight\"] if width > height : screenorientation= 1.00#landscape", "'', \"Server set to the desired value\") self.assertEqual(self.wsServer.PORT, 12345, \"Server", "\"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata5 ={\"type\":\"image\",", "get stuck at times. Solution is to use sendAll, use", "EventService import EventManager as em import socket from base64 import", "repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This line seems to get", "message class TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1)", "'127.0.0.1' # The remote host PORT = 17322 class RestServerTestCase(unittest.TestCase):", "def test_wellformed_Message_for_Text(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key:", "result = es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted data is retrieved and it", "self.assertIsInstance(self.connection, MySQLdb.connection, \"Database connection accurately set\") jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\",", "set to 127.0.0.1\") def test_invalid_Request(self): message= \"Test Message\" self.testsocket.send(message) data", "test_malformed_Message(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol:", "def test_start_websocket(self): rv =self.app.get('/startwebsocketserver') # print rv.data assert 'READY' in", "a type is rejected\") def test_wellformed_Message_for_Text(self): message = \"GET /mychat", "HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse", "equal of the second test\") for row in photoList2: assert", "\"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata6", "= len(message) if payload_len < 126 : header = struct.pack('>BB',", "and accepted\") ##TO RUN THE FOLLOWING UNIT TESTS IT IS", "payload_len) header += l message = header +message else: b2", "= \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin:", "accepted\") def test_wellformed_Message_for_Json(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection:", "={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125},", "and it is not null\") for row in result: self.assertEqual(row[0],", "test_invalid_Request(self): message= \"Test Message\" self.testsocket.send(message) data = repr(self.testsocket.recv(1024)) #print 'Response", "wsresponse = repr(self.testsocket.recv(1024)) #print 'Response to valid ws request %s'%wsresponse", "\"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0,", "self.testsocket.send(message) data = repr(self.testsocket.recv(1024)) #print 'Response to invalid message<TestMessage> %s'%(data)", "returns a non None list\") def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg", "+message return message class TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer = ws('',12345,'127.0.0.1')", "={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4582115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125},", "\"getLocationImageData returns a non None list.This is a feature test", "es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius = 0.0001 photoList", "\"Longitude are saved\") HOST = '127.0.0.1' # The remote host", "\"Server set to the desired value\") self.assertEqual(self.wsServer.PORT, 12345, \"Server port", "\"Invalid Message rejected\") def test_valid_WS_Request(self): message = \"GET /mychat HTTP/1.1\\nHost:", "for row in result: self.assertEqual(row[0], \"image_2014.3.4_14.40.30.png\", \"Image name is correctly", "\"getImageData returns a non None list\") def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105')", "suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json'))", "jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000,", "testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite", "width > height : screenorientation= 1.00#landscape else : screenorientation= 0.00#potrait", "flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING'] = True self.app = EventService.app.test_client() flaskr.init_db()", "\"In valid Message rejected\") def test_malformed_Message(self): message = \"GET /mychat", "EXPECTED HAVE THE DATABASE ##CREATED. DATABASE SCRIPT IS PROVIDED TO", "data self.assertEqual(data, \"\\'json is received\\'\", \"json Messages is identified and", "self.assertEqual(data, \"\\'Text received\\'\", \"Text Messages is identified and accepted\") def", "\"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\",", "\"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata6 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.36\", \"ext\":\"png\",", "import flaskr import tempfile def encodeMessage( message): message = b64encode(message)", "\"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\",", "at the end data = repr(self.testsocket.recv(1024)) print data self.assertEqual(data, \"\\'Text", "testsuit suite = suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) # if", "= header +message return message class TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer", "\"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata4 ={\"type\":\"image\",", "< 126 : header = struct.pack('>BB', b1, payload_len) message= header", "screenorientation= 1.00#landscape else : screenorientation= 0.00#potrait filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1", "websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024))", "sqlstring1 = \"INSERT INTO Imagedata values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"])", "chr(b2) l = struct.pack(\">H\", payload_len) header += l message =", "\"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3)", "def setUp(self): self.connection = es.dbConnect() def tearDown(self): self.connection.close() def test_data_insert_data_Read(self):", "\"Length of the list should be equal of the first", "testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image'))", "\"Text Messages is identified and accepted\") def test_wellformed_Message_for_Json(self): message =", "not rejected\") self.assertIsNotNone(wsresponse, \"Connection Response is not Empty\") self.testsocket.sendall((\"Test Message\"))", "tempfile def encodeMessage( message): message = b64encode(message) b1 =0x80 |", "\"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata4 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.34\", \"ext\":\"png\",", "self.app.get('/') assert 'This is a REST Service for 2D3DCapture Server.'", "'Response to invalid message<TestMessage> %s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid Message rejected\")", "= json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData returns a non None list\")", "jsondata ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.30\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000,", "\"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata2 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.32\", \"ext\":\"png\", \"deviceType\":\"Mobile\",", "def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', \"Server set to the desired value\")", "chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test Message\")", "= '0' heading = '0' speed = '0' width =", "=\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 = \",\"+str(jsondata[\"device\"][\"ra\"])+\",\"+str(jsondata[\"device\"][\"rb\"])+\",\"+str(jsondata[\"device\"][\"rg\"])+\",\"+str(screenorientation)+\",\\'\"+jsondata[\"device\"][\"orientation\"]+\"\\',now(),\\'\"+str(jsondata[\"deviceOS\"])+\"\\',\\'\"+str(jsondata[\"browsertype\"])+\"\\',\\'\"+str(jsondata[\"deviceType\"])+\"\\');\" sqlstring = sqlstring1 + sqlstring2+ sqlstring3", "assert 'This is a REST Service for 2D3DCapture Server.' in", "\"In valid Message rejected\") def test_invalid_Messge(self): message = \"GET /mychat", "12345, \"Server port is set correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost set", "self.testsocket.sendall(\"Test Message\") data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In", "INTO Imagedata values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3", "test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData returns", "\"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata5 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.35\", \"ext\":\"png\", \"deviceType\":\"Mobile\",", "def test_wellformed_Message_for_Json(self): message = \"GET /mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key:", "\"vheight\":800} jsondata3 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.33\", \"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4584104, \"lat\":65.0600797,", "list should be equal of the first test\") for row", "=jsondata[\"vheight\"] if width > height : screenorientation= 1.00#landscape else :", "self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #print 'Response to valid ws request", "the end data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\", \"Messages with", "\"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698,", "\"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} alt =", "\"ext\":\"png\", \"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0,", "longitude from Imagedata where time=\\'2014.3.4_14.40.31\\'' result = es.dbRead(sqlreadsting) self.assertIsNotNone(result, \"Inserted", "encodeMessage( message): message = b64encode(message) b1 =0x80 | 0x1 &", "def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, \"Database connection accurately set\") jsondata ={\"type\":\"image\",", "un encoded Request %s'%(data) self.assertEqual(data, \"\\'Un expected opcode\\'\", \"In valid", "Get local machine name port = 12345 self.testsocket.connect((host, port)) def", "struct.pack(\">Q\", payload_len) header += l message = header +message return", "is identified and accepted\") ##TO RUN THE FOLLOWING UNIT TESTS", "payload_len < 126 : header = struct.pack('>BB', b1, payload_len) message=", "\"gy\":0, \"gz\":0, \"ra\":210.5637, \"rb\":47.5657, \"rg\":6.9698, \"orientation\":\"potrait\"}, \"vwidth\":480, \"vheight\":800} jsondata3 ={\"type\":\"image\",", "\"position\":{\"lon\":25.4583105, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0,", "repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line seems to get", "testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite = suite()", "if __name__ == \"__main__\": # #import sys;sys.argv = ['', 'Test.testName']", "\\n at the end data = repr(self.testsocket.recv(1024)) # print data", "speed = '0' width = jsondata[\"vwidth\"] height =jsondata[\"vheight\"] if width", "assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data def test_start_websocket(self): rv =self.app.get('/startwebsocketserver')", "payload_len = len(message) if payload_len < 126 : header =", "invalid message<TestMessage> %s'%(data) self.assertEqual(data, '\\'CONNECTION_REJECTED\\'', \"Invalid Message rejected\") def test_valid_WS_Request(self):", "base64 import b64encode import struct import MySQLdb import json import", "be equal of the first test\") for row in photoList:", "'0' heading = '0' speed = '0' width = jsondata[\"vwidth\"]", "testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite = suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite)", "sleep import EventService as es from EventService import WebSocketServer as", "def tearDown(self): self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, \"Database connection accurately", "message = header +message else: b2 |= 127 header +=", "127 header += chr(b1) header += chr(b2) l = struct.pack(\">Q\",", "HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse", "'0' speed = '0' width = jsondata[\"vwidth\"] height =jsondata[\"vheight\"] if", "= suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__ ==", "the second test\") for row in photoList2: assert 'image_2014.3.4_14.40.34.png' or", "TestDatabase(unittest.TestCase): def setUp(self): self.connection = es.dbConnect() def tearDown(self): self.connection.close() def", "height : screenorientation= 1.00#landscape else : screenorientation= 0.00#potrait filename =", "Empty\") self.testsocket.sendall((\"Test Message\")) data = repr(self.testsocket.recv(1024)) #print 'Response to un", "= repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'MISFORMATED MESSAGE\\'\", \"Messages with out a type", "/mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>==\\nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message)", "(payload_len < ((2 ** 16) - 1)): b2 |= 126", "rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getLocationImageData returns a", "= 12345 self.testsocket.connect((host, port)) def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def", "at times. Solution is to use sendAll, use \\n at", "+= l message = header +message return message class TestWebSockets(unittest.TestCase):", "\"Inserted data is retrieved and it is not null\") for", "is a feature test for location based image data\") def", "= repr(self.testsocket.recv(1024)) #print 'Response to un encoded Request %s'%(data) self.assertEqual(data,", "and saved\") self.assertEqual(row[1], 65.0600797, \"Latitudes are saved\") self.assertEqual(row[2], 25.4583105, \"Longitude", "test_start_websocket(self): rv =self.app.get('/startwebsocketserver') # print rv.data assert 'READY' in rv.data", "0.0001 photoList = es.getClosestImages( 65.0601787, 25.4583107, radius ) self.assertEqual(len(photoList), 4,", "'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0] def suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject'))", "header += chr(b1) header += chr(b2) l = struct.pack(\">Q\", payload_len)", "= repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test Message\") data = repr(self.testsocket.recv(1024)) self.assertEqual(data, \"\\'Un", "|= 127 header += chr(b1) header += chr(b2) l =", "correctly\") self.assertEqual(self.wsServer.LOCALHOST, \"127.0.0.1\", \"Localhost set to 127.0.0.1\") def test_invalid_Request(self): message=", "self.testsocket.send(encodeMessage(\"1<---->Test Message\"))#This line seems to get stuck at times. Solution", "json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , \"getImageData returns a non None list\") def", "'This is a REST Service for 2D3DCapture Server.' in rv.data", "set to the desired value\") self.assertEqual(self.wsServer.PORT, 12345, \"Server port is", "65.0601787, 25.4583107, radius ) self.assertEqual(len(photoList), 4, \"Length of the list", "def test_post_image(self): rv = self.app.post('/postImage') assert 'READY' in rv.data def", "= header +message else: b2 |= 127 header += chr(b1)", "\"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4587125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0,", "\"Test message\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #print 'Response to valid", "the first test\") for row in photoList: assert 'image_2014.3.4_14.40.32.png' or", "runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__ == \"__main__\": #", "DATA IS NOT PROVIDED. class TestDatabase(unittest.TestCase): def setUp(self): self.connection =", "else : screenorientation= 0.00#potrait filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 = \"INSERT", "in photoList2: assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0] def suite():", "testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite = suite() runner", "Imagedata values (\\'\"+filename+\"\\',GeomFromText ('POINT(\"+ str(jsondata[\"position\"][\"lat\"])+\" \"+str(jsondata[\"position\"][\"lon\"])+\")'),\"+str(jsondata[\"position\"][\"alt\"])+\",\"+str(jsondata[\"position\"][\"acc\"]) sqlstring2 =\",\"+str(jsondata[\"device\"][\"gx\"])+\",\"+str(jsondata[\"device\"][\"gy\"])+\",\"+str(jsondata[\"device\"][\"gz\"]) sqlstring3 =", "rejected\") self.assertIsNotNone(wsresponse, \"Connection Response is not Empty\") self.testsocket.sendall((\"Test Message\")) data", "tempfile.mkstemp() EventService.app.config['TESTING'] = True self.app = EventService.app.test_client() flaskr.init_db() #self.socketServer =", "\"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0,", "expected opcode\\'\", \"In valid Message rejected\") def test_malformed_Message(self): message =", "/mychat HTTP/1.1\\nHost: server.example.com\\nUpgrade: websocket\\nConnection: Upgrade\\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\\nSec-WebSocket-Version: 13\\nOrigin: localhost\\n\\n\" #", "socket from base64 import b64encode import struct import MySQLdb import", "'CLOSED' or 'ALREADY_CLOSSED' in rv.data def test_start_websocket(self): rv =self.app.get('/startwebsocketserver') #", "self.wsServer.setRunning(True); sleep(1) self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #", "%s'%wsresponse self.assertNotEqual(wsresponse, '\\'CONNECTION_REJECTED\\'', \"Connection is not rejected\") self.assertIsNotNone(wsresponse, \"Connection Response", "data = repr(self.testsocket.recv(1024)) print data self.assertEqual(data, \"\\'Text received\\'\", \"Text Messages", "self.assertEqual(self.wsServer.SERVER, '', \"Server set to the desired value\") self.assertEqual(self.wsServer.PORT, 12345,", "Browser,devicetype,X(location) as latitude, Y(location) as longitude from Imagedata where time=\\'2014.3.4_14.40.31\\''", "socket object host = 'localhost' # Get local machine name", "l message = header +message else: b2 |= 127 header", "PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES ##ASSISCIATED DATA", "location based image data\") def test_closest_Image_retrieval(self): jsondata1 ={\"type\":\"image\", \"time\":\"2014.3.4_14.40.31\", \"ext\":\"png\",", "of the second test\") for row in photoList2: assert 'image_2014.3.4_14.40.34.png'", "= repr(self.testsocket.recv(1024)) #print 'Response to valid ws request %s'%wsresponse self.assertNotEqual(wsresponse,", "\"deviceType\":\"Mobile\", \"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0,", "testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite =", "13\\nOrigin: localhost\\n\\n\" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall(\"Test Message\") data", "as latitude, Y(location) as longitude from Imagedata where time=\\'2014.3.4_14.40.31\\'' result", "print data self.assertEqual(data, \"\\'Text received\\'\", \"Text Messages is identified and", "width = jsondata[\"vwidth\"] height =jsondata[\"vheight\"] if width > height :", ": screenorientation= 0.00#potrait filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 = \"INSERT INTO", "\"position\":{\"lon\":25.4586115, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0, \"gy\":0, \"gz\":0,", "#print 'Response to un encoded Request %s'%(data) self.assertEqual(data, \"\\'Un expected", "filename = jsondata[\"type\"]+\"_\"+jsondata[\"time\"]+\".\"+jsondata[\"ext\"] sqlstring1 = \"INSERT INTO Imagedata values (\\'\"+filename+\"\\',GeomFromText", "self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage(\"Test Message\"))#This line", "Message\"))#This line seems to get stuck at times. Solution is", "testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read'))", "\"deviceOS\":\"Badda\", \"browsertype\":\"Firefox\", \"position\":{\"lon\":25.4588125, \"lat\":65.0600797, \"alt\":-1000, \"acc\":48.38800048828125}, \"device\":{\"ax\":0, \"ay\":0, \"az\":0, \"gx\":0,", "testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data'))" ]
[ "from OpenDrive.client_side import file_changes_json as c_json from OpenDrive.client_side import interface", "h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase): def setUp(self) -> None: h_clear_init_all_folders() self._server_process", "time.sleep(2) # wait till changes.json is created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content", "\"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1 def tearDown(self) -> None: main.shutdown()", "threading import time import unittest from OpenDrive.client_side import file_changes_json as", "= h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2) # wait", "finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy file is not", "client_paths from OpenDrive.server_side import paths as server_paths from tests.client_side.helper_client import", "as server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all import h_client_routine,", "setUp(self) -> None: h_clear_init_all_folders() self._server_process = h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA,", "client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1 def tearDown(self) -> None:", "unittest from OpenDrive.client_side import file_changes_json as c_json from OpenDrive.client_side import", "as client_paths from OpenDrive.server_side import paths as server_paths from tests.client_side.helper_client", "os import threading import time import unittest from OpenDrive.client_side import", "= h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1", "World\") time.sleep(5) # wait till synchronization finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id),", "1 def tearDown(self) -> None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self):", "TestMain(unittest.TestCase): def setUp(self) -> None: h_clear_init_all_folders() self._server_process = h_start_server_process() self.folder1_abs_local_path", "h_clear_init_all_folders() self._server_process = h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME", "interface from OpenDrive.client_side import main from OpenDrive.client_side import paths as", "paths as server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all import", "daemon=True) main_thread.start() time.sleep(2) # wait till changes.json is created interface.add_sync_folder(self.folder1_abs_local_path,", "h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2) # wait till", "OpenDrive.client_side import file_changes_json as c_json from OpenDrive.client_side import interface from", "till synchronization finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy file", "import time import unittest from OpenDrive.client_side import file_changes_json as c_json", "putest_start_logged_in(self): user = h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2)", "from OpenDrive.client_side import interface from OpenDrive.client_side import main from OpenDrive.client_side", "is not pulled to server!\") self.assertEqual(expected_content, c_json.get_all_data()) time.sleep(1) # wait", "import main from OpenDrive.client_side import paths as client_paths from OpenDrive.server_side", "def setUp(self) -> None: h_clear_init_all_folders() self._server_process = h_start_server_process() self.folder1_abs_local_path =", "open(file_path, \"w\") as f: f.write(\"Hello World\") time.sleep(5) # wait till", "import h_register_dummy_user_device_client from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \\ h_clear_init_all_folders,", "import file_changes_json as c_json from OpenDrive.client_side import interface from OpenDrive.client_side", "main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self): user = h_register_dummy_user_device_client() main_thread =", "tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \\ h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase):", "is created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content = c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path,", "OpenDrive.server_side import paths as server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client from", "\"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy file is not pulled to server!\") self.assertEqual(expected_content,", "file_changes_json as c_json from OpenDrive.client_side import interface from OpenDrive.client_side import", "self._server_process = h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME =", "file_path = os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with open(file_path, \"w\") as f: f.write(\"Hello", "OpenDrive.client_side import interface from OpenDrive.client_side import main from OpenDrive.client_side import", "@h_client_routine(clear_folders=False) def putest_start_logged_in(self): user = h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start, daemon=True)", "time import unittest from OpenDrive.client_side import file_changes_json as c_json from", "main_thread.start() time.sleep(2) # wait till changes.json is created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\")", "h_create_empty class TestMain(unittest.TestCase): def setUp(self) -> None: h_clear_init_all_folders() self._server_process =", "created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content = c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path, \"dummy.txt\")", "h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self): user = h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start,", "expected_content = c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with open(file_path, \"w\")", "not pulled to server!\") self.assertEqual(expected_content, c_json.get_all_data()) time.sleep(1) # wait till", "-> None: h_clear_init_all_folders() self._server_process = h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\")", "wait till changes.json is created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content = c_json.get_all_data()", "def putest_start_logged_in(self): user = h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start, daemon=True) main_thread.start()", "expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy file is not pulled", "with open(file_path, \"w\") as f: f.write(\"Hello World\") time.sleep(5) # wait", "import interface from OpenDrive.client_side import main from OpenDrive.client_side import paths", "f.write(\"Hello World\") time.sleep(5) # wait till synchronization finished expected_path =", "threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2) # wait till changes.json is created", "\"folder1\") expected_content = c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with open(file_path,", "file is not pulled to server!\") self.assertEqual(expected_content, c_json.get_all_data()) time.sleep(1) #", "synchronization finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy file is", "wait till synchronization finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy", "from OpenDrive.client_side import paths as client_paths from OpenDrive.server_side import paths", "\"dummy file is not pulled to server!\") self.assertEqual(expected_content, c_json.get_all_data()) time.sleep(1)", "main_thread = threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2) # wait till changes.json", "h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1 def tearDown(self) -> None: main.shutdown() h_stop_server_process(self._server_process)", "h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1 def", "None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self): user = h_register_dummy_user_device_client() main_thread", "= threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2) # wait till changes.json is", "from OpenDrive.server_side import paths as server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client", "h_client_routine, h_start_server_process, h_stop_server_process, \\ h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase): def setUp(self)", "import paths as server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all", "h_start_server_process, h_stop_server_process, \\ h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase): def setUp(self) ->", "# wait till synchronization finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path),", "import unittest from OpenDrive.client_side import file_changes_json as c_json from OpenDrive.client_side", "from tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process,", "from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \\ h_clear_init_all_folders, h_create_empty class", "= c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with open(file_path, \"w\") as", "user = h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2) #", "till changes.json is created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content = c_json.get_all_data() file_path", "changes.json is created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content = c_json.get_all_data() file_path =", "import paths as client_paths from OpenDrive.server_side import paths as server_paths", "c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with open(file_path, \"w\") as f:", "import os import threading import time import unittest from OpenDrive.client_side", "def tearDown(self) -> None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self): user", "self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1 def tearDown(self)", "\\ h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase): def setUp(self) -> None: h_clear_init_all_folders()", "= 1 def tearDown(self) -> None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def", "class TestMain(unittest.TestCase): def setUp(self) -> None: h_clear_init_all_folders() self._server_process = h_start_server_process()", "<filename>src/tests/client_side/test_main.py import os import threading import time import unittest from", "tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \\", "None: h_clear_init_all_folders() self._server_process = h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path)", "= client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, \"folder1\") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1 def tearDown(self) ->", "h_register_dummy_user_device_client from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \\ h_clear_init_all_folders, h_create_empty", "= os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with open(file_path, \"w\") as f: f.write(\"Hello World\")", "time.sleep(5) # wait till synchronization finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\")", "os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with open(file_path, \"w\") as f: f.write(\"Hello World\") time.sleep(5)", "OpenDrive.client_side import main from OpenDrive.client_side import paths as client_paths from", "# wait till changes.json is created interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content =", "server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all import h_client_routine, h_start_server_process,", "main from OpenDrive.client_side import paths as client_paths from OpenDrive.server_side import", "c_json from OpenDrive.client_side import interface from OpenDrive.client_side import main from", "\"dummy.txt\") with open(file_path, \"w\") as f: f.write(\"Hello World\") time.sleep(5) #", "OpenDrive.client_side import paths as client_paths from OpenDrive.server_side import paths as", "main.MIN_UPDATE_PAUSE_TIME = 1 def tearDown(self) -> None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False)", "paths as client_paths from OpenDrive.server_side import paths as server_paths from", "pulled to server!\") self.assertEqual(expected_content, c_json.get_all_data()) time.sleep(1) # wait till waiting...", "h_stop_server_process, \\ h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase): def setUp(self) -> None:", "import threading import time import unittest from OpenDrive.client_side import file_changes_json", "from OpenDrive.client_side import main from OpenDrive.client_side import paths as client_paths", "as c_json from OpenDrive.client_side import interface from OpenDrive.client_side import main", "as f: f.write(\"Hello World\") time.sleep(5) # wait till synchronization finished", "= os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy file is not pulled to", "f: f.write(\"Hello World\") time.sleep(5) # wait till synchronization finished expected_path", "interface.add_sync_folder(self.folder1_abs_local_path, \"folder1\") expected_content = c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path, \"dummy.txt\") with", "-> None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self): user = h_register_dummy_user_device_client()", "tearDown(self) -> None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self): user =", "os.path.join(server_paths.get_users_root_folder(user.user_id), \"folder1/dummy.txt\") self.assertTrue(os.path.exists(expected_path), \"dummy file is not pulled to server!\")", "import h_client_routine, h_start_server_process, h_stop_server_process, \\ h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase): def", "self.assertTrue(os.path.exists(expected_path), \"dummy file is not pulled to server!\") self.assertEqual(expected_content, c_json.get_all_data())", "\"w\") as f: f.write(\"Hello World\") time.sleep(5) # wait till synchronization" ]
[ "def test_imread_file_url(): # tweak data path so that file URI", "that we can identify the content # by extension image", "import io, data_dir from skimage._shared import testing from skimage._shared.testing import", "URI works on both unix and windows. data_path = data_dir.lstrip(os.path.sep)", "io.imread(httpserver.url + '/test.jpg' + '?' + 's' * 266) assert", "data_path.replace(os.path.sep, '/') image_url = 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert image.shape", "b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00'", "data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep, '/') image_url = 'file:///{0}/camera.png'.format(data_path) image =", "# https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it will serve anything you provide", "(512, 512) def test_imread_http_url(httpserver): # httpserver is a fixture provided", "x) def test_stack_non_array(): with testing.raises(ValueError): io.push([[1, 2, 3]]) def test_imread_file_url():", "skimage._shared import testing from skimage._shared.testing import assert_array_equal one_by_one_jpeg = (", "testing.raises(ValueError): io.push([[1, 2, 3]]) def test_imread_file_url(): # tweak data path", "# httpserver is a fixture provided by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/", "b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def test_stack_basic(): x", "on its url. # we add a /test.jpg so that", "as np from skimage import io, data_dir from skimage._shared import", "assert_array_equal(io.pop(), x) def test_stack_non_array(): with testing.raises(ValueError): io.push([[1, 2, 3]]) def", "os import numpy as np from skimage import io, data_dir", "image = io.imread(image_url) assert image.shape == (512, 512) def test_imread_http_url(httpserver):", "https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it will serve anything you provide to", "io.imread(image_url) assert image.shape == (512, 512) def test_imread_http_url(httpserver): # httpserver", "will serve anything you provide to it on its url.", "file URI works on both unix and windows. data_path =", "content # by extension image = io.imread(httpserver.url + '/test.jpg' +", "def test_stack_non_array(): with testing.raises(ValueError): io.push([[1, 2, 3]]) def test_imread_file_url(): #", "is a fixture provided by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) #", "( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00'", "# we add a /test.jpg so that we can identify", "provide to it on its url. # we add a", "a fixture provided by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it", "import numpy as np from skimage import io, data_dir from", "io.push(x) assert_array_equal(io.pop(), x) def test_stack_non_array(): with testing.raises(ValueError): io.push([[1, 2, 3]])", "serve anything you provide to it on its url. #", "b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def test_stack_basic(): x = np.arange(12).reshape(3, 4)", "httpserver.serve_content(one_by_one_jpeg) # it will serve anything you provide to it", "url. # we add a /test.jpg so that we can", "identify the content # by extension image = io.imread(httpserver.url +", "= np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(), x) def test_stack_non_array(): with testing.raises(ValueError):", "= data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep, '/') image_url = 'file:///{0}/camera.png'.format(data_path) image", "+ '?' + 's' * 266) assert image.shape == (1,", "assert image.shape == (512, 512) def test_imread_http_url(httpserver): # httpserver is", "# by extension image = io.imread(httpserver.url + '/test.jpg' + '?'", "b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def test_stack_basic(): x =", "testing from skimage._shared.testing import assert_array_equal one_by_one_jpeg = ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02'", "data_path = data_path.replace(os.path.sep, '/') image_url = 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url)", "b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def test_stack_basic():", "data_path = data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep, '/') image_url = 'file:///{0}/camera.png'.format(data_path)", "httpserver is a fixture provided by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg)", "so that we can identify the content # by extension", "test_stack_basic(): x = np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(), x) def test_stack_non_array():", "test_stack_non_array(): with testing.raises(ValueError): io.push([[1, 2, 3]]) def test_imread_file_url(): # tweak", "<gh_stars>0 import os import numpy as np from skimage import", "both unix and windows. data_path = data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep,", "data_dir from skimage._shared import testing from skimage._shared.testing import assert_array_equal one_by_one_jpeg", "the content # by extension image = io.imread(httpserver.url + '/test.jpg'", "by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it will serve anything", "def test_imread_http_url(httpserver): # httpserver is a fixture provided by pytest-localserver", "numpy as np from skimage import io, data_dir from skimage._shared", "path so that file URI works on both unix and", "'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert image.shape == (512, 512) def", "/test.jpg so that we can identify the content # by", "import testing from skimage._shared.testing import assert_array_equal one_by_one_jpeg = ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01'", "pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it will serve anything you", "provided by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it will serve", "a /test.jpg so that we can identify the content #", "= 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert image.shape == (512, 512)", "skimage import io, data_dir from skimage._shared import testing from skimage._shared.testing", "np from skimage import io, data_dir from skimage._shared import testing", "image_url = 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert image.shape == (512,", "'?' + 's' * 266) assert image.shape == (1, 1)", "from skimage import io, data_dir from skimage._shared import testing from", "and windows. data_path = data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep, '/') image_url", "= data_path.replace(os.path.sep, '/') image_url = 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert", "data path so that file URI works on both unix", "= io.imread(image_url) assert image.shape == (512, 512) def test_imread_http_url(httpserver): #", "fixture provided by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it will", "one_by_one_jpeg = ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11'", "unix and windows. data_path = data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep, '/')", "extension image = io.imread(httpserver.url + '/test.jpg' + '?' + 's'", "on both unix and windows. data_path = data_dir.lstrip(os.path.sep) data_path =", "import os import numpy as np from skimage import io,", "from skimage._shared.testing import assert_array_equal one_by_one_jpeg = ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04'", "'/test.jpg' + '?' + 's' * 266) assert image.shape ==", "anything you provide to it on its url. # we", "image = io.imread(httpserver.url + '/test.jpg' + '?' + 's' *", "+ '/test.jpg' + '?' + 's' * 266) assert image.shape", "image.shape == (512, 512) def test_imread_http_url(httpserver): # httpserver is a", "512) def test_imread_http_url(httpserver): # httpserver is a fixture provided by", "b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def test_stack_basic(): x = np.arange(12).reshape(3,", "we add a /test.jpg so that we can identify the", "assert_array_equal one_by_one_jpeg = ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10'", "b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' )", "by extension image = io.imread(httpserver.url + '/test.jpg' + '?' +", "io, data_dir from skimage._shared import testing from skimage._shared.testing import assert_array_equal", "we can identify the content # by extension image =", "def test_stack_basic(): x = np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(), x) def", ") def test_stack_basic(): x = np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(), x)", "# it will serve anything you provide to it on", "its url. # we add a /test.jpg so that we", "4) io.push(x) assert_array_equal(io.pop(), x) def test_stack_non_array(): with testing.raises(ValueError): io.push([[1, 2,", "b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def test_stack_basic(): x = np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(),", "from skimage._shared import testing from skimage._shared.testing import assert_array_equal one_by_one_jpeg =", "test_imread_file_url(): # tweak data path so that file URI works", "tweak data path so that file URI works on both", "works on both unix and windows. data_path = data_dir.lstrip(os.path.sep) data_path", "skimage._shared.testing import assert_array_equal one_by_one_jpeg = ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t'", "you provide to it on its url. # we add", "io.push([[1, 2, 3]]) def test_imread_file_url(): # tweak data path so", "windows. data_path = data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep, '/') image_url =", "that file URI works on both unix and windows. data_path", "b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def", "to it on its url. # we add a /test.jpg", "can identify the content # by extension image = io.imread(httpserver.url", "b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9' ) def test_stack_basic(): x = np.arange(12).reshape(3, 4) io.push(x)", "so that file URI works on both unix and windows.", "with testing.raises(ValueError): io.push([[1, 2, 3]]) def test_imread_file_url(): # tweak data", "it will serve anything you provide to it on its", "it on its url. # we add a /test.jpg so", "add a /test.jpg so that we can identify the content", "3]]) def test_imread_file_url(): # tweak data path so that file", "b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'", "np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(), x) def test_stack_non_array(): with testing.raises(ValueError): io.push([[1,", "test_imread_http_url(httpserver): # httpserver is a fixture provided by pytest-localserver #", "# tweak data path so that file URI works on", "'/') image_url = 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert image.shape ==", "import assert_array_equal one_by_one_jpeg = ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10'", "2, 3]]) def test_imread_file_url(): # tweak data path so that", "= ( b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01' b'\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x03\\x02\\x02\\x02\\x02' b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00'", "b'\\x02\\x03\\x02\\x02\\x02\\x03\\x03\\x03\\x03\\x04\\x06\\x04\\x04' b'\\x04\\x04\\x04\\x08\\x06\\x06\\x05\\x06\\t\\x08\\n\\n\\t\\x08\\t\\t' b'\\n\\x0c\\x0f\\x0c\\n\\x0b\\x0e\\x0b\\t\\t\\r\\x11\\r\\x0e\\x0f\\x10' b'\\x10\\x11\\x10\\n\\x0c\\x12\\x13\\x12\\x10\\x13\\x0f\\x10\\x10' b'\\x10\\xff\\xc0\\x00\\x0b\\x08\\x00\\x01\\x00\\x01\\x01\\x01\\x11' b'\\x00\\xff\\xc4\\x00\\x14\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xff\\xc4\\x00' b'\\x14\\x10\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' b'\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xda\\x00\\x08\\x01\\x01\\x00' b'\\x00?\\x00*\\x9f\\xff\\xd9'", "= io.imread(httpserver.url + '/test.jpg' + '?' + 's' * 266)", "x = np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(), x) def test_stack_non_array(): with", "== (512, 512) def test_imread_http_url(httpserver): # httpserver is a fixture" ]
[ "commons.helpers.dataset.context import Context class TestGalaxyProcessor(TestCase): def setUp(self): validation_size = 0.2", "strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() # Set the context to galaxy", "strategy. context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size))", "# -*- coding: utf-8 -*- import os import numpy as", "validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): # Process galaxies. galaxy_processor = GalaxyProcessor(self.galaxy_images_path) #features", "set loading strategy. context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file,", "data set loading strategy. context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset =", "# Set the context to galaxy label data set loading", "galaxy label data set loading strategy. context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy)", "TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from", "\"/data/images/\" # Create instance of data set loading strategies. galaxy_label_data_set_strategy", "def setUp(self): validation_size = 0.2 # Get the ground truth", "script's parameters. self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"]", "self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\" # Create instance of data", "= GalaxyDataSetLabelStrategy() # Set the context to galaxy label data", "context to galaxy label data set loading strategy. context =", "core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import", "testGalaxyProcessor(self): # Process galaxies. galaxy_processor = GalaxyProcessor(self.galaxy_images_path) #features = galaxy_processor.process_galaxy(self.label_dataset)", "CSV file from script's parameters. self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\"", "class TestGalaxyProcessor(TestCase): def setUp(self): validation_size = 0.2 # Get the", "set loading strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() # Set the context", "validation_size = 0.2 # Get the ground truth CSV file", "unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import", "numpy as np from unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor import", "Get the ground truth CSV file from script's parameters. self.galaxy_csv_file", "Set the context to galaxy label data set loading strategy.", "TestGalaxyProcessor(TestCase): def setUp(self): validation_size = 0.2 # Get the ground", "the context to galaxy label data set loading strategy. context", "file from script's parameters. self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path", "from commons.helpers.dataset.context import Context class TestGalaxyProcessor(TestCase): def setUp(self): validation_size =", "def testGalaxyProcessor(self): # Process galaxies. galaxy_processor = GalaxyProcessor(self.galaxy_images_path) #features =", "= os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\" # Create instance of data set", "context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): # Process galaxies. galaxy_processor =", "os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\" # Create", "setUp(self): validation_size = 0.2 # Get the ground truth CSV", "+ \"/data/images/\" # Create instance of data set loading strategies.", "label data set loading strategy. context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset", "the ground truth CSV file from script's parameters. self.galaxy_csv_file =", "# Get the ground truth CSV file from script's parameters.", "import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context class TestGalaxyProcessor(TestCase): def setUp(self):", "import TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy", "os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\" # Create instance of data set loading", "# Create instance of data set loading strategies. galaxy_label_data_set_strategy =", "Create instance of data set loading strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy()", "utf-8 -*- import os import numpy as np from unittest", "os import numpy as np from unittest import TestCase from", "import os import numpy as np from unittest import TestCase", "data set loading strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() # Set the", "loading strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() # Set the context to", "Context class TestGalaxyProcessor(TestCase): def setUp(self): validation_size = 0.2 # Get", "galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() # Set the context to galaxy label", "coding: utf-8 -*- import os import numpy as np from", "instance of data set loading strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() #", "import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context", "parameters. self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"] +", "#!/usr/bin/env python # -*- coding: utf-8 -*- import os import", "np from unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from", "GalaxyDataSetLabelStrategy() # Set the context to galaxy label data set", "-*- coding: utf-8 -*- import os import numpy as np", "import Context class TestGalaxyProcessor(TestCase): def setUp(self): validation_size = 0.2 #", "-*- import os import numpy as np from unittest import", "self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\"", "truth CSV file from script's parameters. self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"] +", "= Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self):", "python # -*- coding: utf-8 -*- import os import numpy", "commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context class TestGalaxyProcessor(TestCase): def", "GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context class TestGalaxyProcessor(TestCase): def setUp(self): validation_size", "loading strategy. context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False,", "Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): #", "GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context class", "as np from unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor", "= context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): # Process galaxies. galaxy_processor", "from unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy", "to galaxy label data set loading strategy. context = Context(galaxy_label_data_set_strategy)", "context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def", "of data set loading strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() # Set", "<reponame>EmilioCC/gti770-student-framework #!/usr/bin/env python # -*- coding: utf-8 -*- import os", "from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context class TestGalaxyProcessor(TestCase):", "ground truth CSV file from script's parameters. self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"]", "context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): # Process", "self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): # Process galaxies.", "from script's parameters. self.galaxy_csv_file = os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path =", "= 0.2 # Get the ground truth CSV file from", "from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context", "\"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\" # Create instance of", "+ \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\" # Create instance", "import numpy as np from unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor", "0.2 # Get the ground truth CSV file from script's", "= os.environ[\"VIRTUAL_ENV\"] + \"/data/csv/galaxy/galaxy.csv\" self.galaxy_images_path = os.environ[\"VIRTUAL_ENV\"] + \"/data/images/\" #", "one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): # Process galaxies. galaxy_processor = GalaxyProcessor(self.galaxy_images_path)" ]
[ "data\" def handle(self, *args, **options): fixture_path = path.join(path.dirname( path.dirname( path.dirname(", "BaseCommand from os import path class Command(BaseCommand): help = \"Populates", "from os import path class Command(BaseCommand): help = \"Populates data\"", "from django.core.management import call_command from django.core.management.base import BaseCommand from os", "from django.core.management.base import BaseCommand from os import path class Command(BaseCommand):", "handle(self, *args, **options): fixture_path = path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__) )", "import BaseCommand from os import path class Command(BaseCommand): help =", "path.dirname( path.abspath(__file__) ) ) ), \"fixtures/\") settings.FIXTURE_DIRS = (fixture_path,) call_command(\"loaddata\",", ") ) ), \"fixtures/\") settings.FIXTURE_DIRS = (fixture_path,) call_command(\"loaddata\", \"country\", verbosity=1)", "import settings from django.core.management import call_command from django.core.management.base import BaseCommand", "django.conf import settings from django.core.management import call_command from django.core.management.base import", "path.abspath(__file__) ) ) ), \"fixtures/\") settings.FIXTURE_DIRS = (fixture_path,) call_command(\"loaddata\", \"country\",", "class Command(BaseCommand): help = \"Populates data\" def handle(self, *args, **options):", "from django.conf import settings from django.core.management import call_command from django.core.management.base", "import path class Command(BaseCommand): help = \"Populates data\" def handle(self,", "Command(BaseCommand): help = \"Populates data\" def handle(self, *args, **options): fixture_path", "django.core.management.base import BaseCommand from os import path class Command(BaseCommand): help", "settings from django.core.management import call_command from django.core.management.base import BaseCommand from", "path class Command(BaseCommand): help = \"Populates data\" def handle(self, *args,", "**options): fixture_path = path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__) ) ) ),", "= path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__) ) ) ), \"fixtures/\") settings.FIXTURE_DIRS", "path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__) ) ) ), \"fixtures/\") settings.FIXTURE_DIRS =", "help = \"Populates data\" def handle(self, *args, **options): fixture_path =", "\"Populates data\" def handle(self, *args, **options): fixture_path = path.join(path.dirname( path.dirname(", "django.core.management import call_command from django.core.management.base import BaseCommand from os import", "call_command from django.core.management.base import BaseCommand from os import path class", "*args, **options): fixture_path = path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__) ) )", "path.dirname( path.dirname( path.abspath(__file__) ) ) ), \"fixtures/\") settings.FIXTURE_DIRS = (fixture_path,)", "fixture_path = path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__) ) ) ), \"fixtures/\")", "import call_command from django.core.management.base import BaseCommand from os import path", "os import path class Command(BaseCommand): help = \"Populates data\" def", "def handle(self, *args, **options): fixture_path = path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__)", "= \"Populates data\" def handle(self, *args, **options): fixture_path = path.join(path.dirname(" ]
[ "SaversRegistry.handlers[ext] = method return method class loads_as: \"\"\"Decorator to aid", "\" f\"is defined in the current API to handle {extension}", "*extensions): extension_set = set(extensions) self.extensions = extension_set def __call__(self, method):", "gmso Topology.\"\"\" class UnsupportedFileFormatError(Exception): \"\"\"Exception to be raised whenever the", "not in self.handlers: raise UnsupportedFileFormatError( f\"Extension {extension} cannot be processed", "Registry() LoadersRegistry = Registry() class saves_as: \"\"\"Decorator to aid saving.\"\"\"", "= Registry() class saves_as: \"\"\"Decorator to aid saving.\"\"\" def __init__(self,", "as saver for an extension.\"\"\" for ext in self.extensions: SaversRegistry.handlers[ext]", "extension): if extension not in self.handlers: raise UnsupportedFileFormatError( f\"Extension {extension}", "\"\"\"Get the callable associated with extension.\"\"\" self._assert_can_process(extension) return self.handlers[extension] SaversRegistry", "not supported.\"\"\" class Registry: \"\"\"A registry to incorporate a callable", "no utility \" f\"is defined in the current API to", "self.handlers: raise UnsupportedFileFormatError( f\"Extension {extension} cannot be processed as no", "\"\"\"Exception to be raised whenever the file loading or saving", "loading.\"\"\" def __init__(self, *extensions): extension_set = set(extensions) self.extensions = extension_set", "{} def _assert_can_process(self, extension): if extension not in self.handlers: raise", "formats for gmso Topology.\"\"\" class UnsupportedFileFormatError(Exception): \"\"\"Exception to be raised", "self.extensions: SaversRegistry.handlers[ext] = method return method class loads_as: \"\"\"Decorator to", "UnsupportedFileFormatError(Exception): \"\"\"Exception to be raised whenever the file loading or", "{extension} cannot be processed as no utility \" f\"is defined", "current API to handle {extension} files.\" ) def get_callable(self, extension):", "to handle formats for gmso Topology.\"\"\" class UnsupportedFileFormatError(Exception): \"\"\"Exception to", "with extension.\"\"\" self._assert_can_process(extension) return self.handlers[extension] SaversRegistry = Registry() LoadersRegistry =", "in self.handlers: raise UnsupportedFileFormatError( f\"Extension {extension} cannot be processed as", "extension.\"\"\" for ext in self.extensions: SaversRegistry.handlers[ext] = method return method", "aid loading.\"\"\" def __init__(self, *extensions): extension_set = set(extensions) self.extensions =", "def _assert_can_process(self, extension): if extension not in self.handlers: raise UnsupportedFileFormatError(", "{extension} files.\" ) def get_callable(self, extension): \"\"\"Get the callable associated", "saving is not supported.\"\"\" class Registry: \"\"\"A registry to incorporate", "for an extension.\"\"\" for ext in self.extensions: SaversRegistry.handlers[ext] = method", "method as loader for an extension.\"\"\" for ext in self.extensions:", "set(extensions) self.extensions = extension_set def __call__(self, method): \"\"\"Register the method", "extension): \"\"\"Get the callable associated with extension.\"\"\" self._assert_can_process(extension) return self.handlers[extension]", "__call__(self, method): \"\"\"Register the method as saver for an extension.\"\"\"", "with a file extension.\"\"\" def __init__(self): self.handlers = {} def", "class saves_as: \"\"\"Decorator to aid saving.\"\"\" def __init__(self, *extensions): extension_set", "whenever the file loading or saving is not supported.\"\"\" class", "\"\"\"Decorator to aid saving.\"\"\" def __init__(self, *extensions): extension_set = set(extensions)", "loader for an extension.\"\"\" for ext in self.extensions: LoadersRegistry.handlers[ext] =", "to be raised whenever the file loading or saving is", "as no utility \" f\"is defined in the current API", "return method class loads_as: \"\"\"Decorator to aid loading.\"\"\" def __init__(self,", "extension.\"\"\" for ext in self.extensions: LoadersRegistry.handlers[ext] = method return method", "= extension_set def __call__(self, method): \"\"\"Register the method as loader", "in self.extensions: SaversRegistry.handlers[ext] = method return method class loads_as: \"\"\"Decorator", "loads_as: \"\"\"Decorator to aid loading.\"\"\" def __init__(self, *extensions): extension_set =", "__init__(self, *extensions): extension_set = set(extensions) self.extensions = extension_set def __call__(self,", "registry to incorporate a callable with a file extension.\"\"\" def", "= {} def _assert_can_process(self, extension): if extension not in self.handlers:", "extension_set def __call__(self, method): \"\"\"Register the method as loader for", "method): \"\"\"Register the method as loader for an extension.\"\"\" for", "\"\"\"Decorator to aid loading.\"\"\" def __init__(self, *extensions): extension_set = set(extensions)", "def __call__(self, method): \"\"\"Register the method as loader for an", "__init__(self): self.handlers = {} def _assert_can_process(self, extension): if extension not", "the callable associated with extension.\"\"\" self._assert_can_process(extension) return self.handlers[extension] SaversRegistry =", "= set(extensions) self.extensions = extension_set def __call__(self, method): \"\"\"Register the", "self.extensions = extension_set def __call__(self, method): \"\"\"Register the method as", "UnsupportedFileFormatError( f\"Extension {extension} cannot be processed as no utility \"", "\"\"\"A registry to incorporate a callable with a file extension.\"\"\"", "the method as loader for an extension.\"\"\" for ext in", "for gmso Topology.\"\"\" class UnsupportedFileFormatError(Exception): \"\"\"Exception to be raised whenever", "incorporate a callable with a file extension.\"\"\" def __init__(self): self.handlers", "for ext in self.extensions: SaversRegistry.handlers[ext] = method return method class", "handle {extension} files.\" ) def get_callable(self, extension): \"\"\"Get the callable", "an extension.\"\"\" for ext in self.extensions: LoadersRegistry.handlers[ext] = method return", "be raised whenever the file loading or saving is not", "class loads_as: \"\"\"Decorator to aid loading.\"\"\" def __init__(self, *extensions): extension_set", "extension_set = set(extensions) self.extensions = extension_set def __call__(self, method): \"\"\"Register", "be processed as no utility \" f\"is defined in the", "an extension.\"\"\" for ext in self.extensions: SaversRegistry.handlers[ext] = method return", ") def get_callable(self, extension): \"\"\"Get the callable associated with extension.\"\"\"", "defined in the current API to handle {extension} files.\" )", "get_callable(self, extension): \"\"\"Get the callable associated with extension.\"\"\" self._assert_can_process(extension) return", "method class loads_as: \"\"\"Decorator to aid loading.\"\"\" def __init__(self, *extensions):", "file loading or saving is not supported.\"\"\" class Registry: \"\"\"A", "callable with a file extension.\"\"\" def __init__(self): self.handlers = {}", "saver for an extension.\"\"\" for ext in self.extensions: SaversRegistry.handlers[ext] =", "def __call__(self, method): \"\"\"Register the method as saver for an", "or saving is not supported.\"\"\" class Registry: \"\"\"A registry to", "utilities to handle formats for gmso Topology.\"\"\" class UnsupportedFileFormatError(Exception): \"\"\"Exception", "as loader for an extension.\"\"\" for ext in self.extensions: LoadersRegistry.handlers[ext]", "Registry() class saves_as: \"\"\"Decorator to aid saving.\"\"\" def __init__(self, *extensions):", "saves_as: \"\"\"Decorator to aid saving.\"\"\" def __init__(self, *extensions): extension_set =", "raised whenever the file loading or saving is not supported.\"\"\"", "processed as no utility \" f\"is defined in the current", "class UnsupportedFileFormatError(Exception): \"\"\"Exception to be raised whenever the file loading", "= Registry() LoadersRegistry = Registry() class saves_as: \"\"\"Decorator to aid", "aid saving.\"\"\" def __init__(self, *extensions): extension_set = set(extensions) self.extensions =", "if extension not in self.handlers: raise UnsupportedFileFormatError( f\"Extension {extension} cannot", "LoadersRegistry = Registry() class saves_as: \"\"\"Decorator to aid saving.\"\"\" def", "supported.\"\"\" class Registry: \"\"\"A registry to incorporate a callable with", "f\"Extension {extension} cannot be processed as no utility \" f\"is", "the current API to handle {extension} files.\" ) def get_callable(self,", "to incorporate a callable with a file extension.\"\"\" def __init__(self):", "self._assert_can_process(extension) return self.handlers[extension] SaversRegistry = Registry() LoadersRegistry = Registry() class", "the file loading or saving is not supported.\"\"\" class Registry:", "extension not in self.handlers: raise UnsupportedFileFormatError( f\"Extension {extension} cannot be", "raise UnsupportedFileFormatError( f\"Extension {extension} cannot be processed as no utility", "to aid saving.\"\"\" def __init__(self, *extensions): extension_set = set(extensions) self.extensions", "SaversRegistry = Registry() LoadersRegistry = Registry() class saves_as: \"\"\"Decorator to", "to handle {extension} files.\" ) def get_callable(self, extension): \"\"\"Get the", "def __init__(self): self.handlers = {} def _assert_can_process(self, extension): if extension", "cannot be processed as no utility \" f\"is defined in", "\"\"\"Registry utilities to handle formats for gmso Topology.\"\"\" class UnsupportedFileFormatError(Exception):", "method as saver for an extension.\"\"\" for ext in self.extensions:", "_assert_can_process(self, extension): if extension not in self.handlers: raise UnsupportedFileFormatError( f\"Extension", "handle formats for gmso Topology.\"\"\" class UnsupportedFileFormatError(Exception): \"\"\"Exception to be", "Registry: \"\"\"A registry to incorporate a callable with a file", "in the current API to handle {extension} files.\" ) def", "utility \" f\"is defined in the current API to handle", "API to handle {extension} files.\" ) def get_callable(self, extension): \"\"\"Get", "ext in self.extensions: SaversRegistry.handlers[ext] = method return method class loads_as:", "to aid loading.\"\"\" def __init__(self, *extensions): extension_set = set(extensions) self.extensions", "is not supported.\"\"\" class Registry: \"\"\"A registry to incorporate a", "= extension_set def __call__(self, method): \"\"\"Register the method as saver", "extension_set def __call__(self, method): \"\"\"Register the method as saver for", "method): \"\"\"Register the method as saver for an extension.\"\"\" for", "def __init__(self, *extensions): extension_set = set(extensions) self.extensions = extension_set def", "__call__(self, method): \"\"\"Register the method as loader for an extension.\"\"\"", "for an extension.\"\"\" for ext in self.extensions: LoadersRegistry.handlers[ext] = method", "method return method class loads_as: \"\"\"Decorator to aid loading.\"\"\" def", "self.handlers[extension] SaversRegistry = Registry() LoadersRegistry = Registry() class saves_as: \"\"\"Decorator", "file extension.\"\"\" def __init__(self): self.handlers = {} def _assert_can_process(self, extension):", "a callable with a file extension.\"\"\" def __init__(self): self.handlers =", "extension.\"\"\" def __init__(self): self.handlers = {} def _assert_can_process(self, extension): if", "= method return method class loads_as: \"\"\"Decorator to aid loading.\"\"\"", "\"\"\"Register the method as loader for an extension.\"\"\" for ext", "return self.handlers[extension] SaversRegistry = Registry() LoadersRegistry = Registry() class saves_as:", "extension.\"\"\" self._assert_can_process(extension) return self.handlers[extension] SaversRegistry = Registry() LoadersRegistry = Registry()", "Topology.\"\"\" class UnsupportedFileFormatError(Exception): \"\"\"Exception to be raised whenever the file", "loading or saving is not supported.\"\"\" class Registry: \"\"\"A registry", "files.\" ) def get_callable(self, extension): \"\"\"Get the callable associated with", "associated with extension.\"\"\" self._assert_can_process(extension) return self.handlers[extension] SaversRegistry = Registry() LoadersRegistry", "\"\"\"Register the method as saver for an extension.\"\"\" for ext", "the method as saver for an extension.\"\"\" for ext in", "a file extension.\"\"\" def __init__(self): self.handlers = {} def _assert_can_process(self,", "callable associated with extension.\"\"\" self._assert_can_process(extension) return self.handlers[extension] SaversRegistry = Registry()", "self.handlers = {} def _assert_can_process(self, extension): if extension not in", "class Registry: \"\"\"A registry to incorporate a callable with a", "def get_callable(self, extension): \"\"\"Get the callable associated with extension.\"\"\" self._assert_can_process(extension)", "f\"is defined in the current API to handle {extension} files.\"", "saving.\"\"\" def __init__(self, *extensions): extension_set = set(extensions) self.extensions = extension_set" ]
[ "file name: \") dosyaadi = str(dosyaadi + \".txt\") with open(dosyaadi,", "= input(\"Enter file name: \") dosyaadi = str(dosyaadi + \".txt\")", "name: \") dosyaadi = str(dosyaadi + \".txt\") with open(dosyaadi, 'r')", "'r') as file : dosyaicerigi = file.read() silinecek = str(input(\"Enter", "= file.read() silinecek = str(input(\"Enter the text that you wish", "file.read() silinecek = str(input(\"Enter the text that you wish to", "str(input(\"Enter the text that you wish to delete: \")) dosyaicerigi", ": dosyaicerigi = file.read() silinecek = str(input(\"Enter the text that", "silinecek = str(input(\"Enter the text that you wish to delete:", "you wish to delete: \")) dosyaicerigi = dosyaicerigi.replace(silinecek, '') with", "open(dosyaadi, 'r') as file : dosyaicerigi = file.read() silinecek =", "dosyaicerigi = file.read() silinecek = str(input(\"Enter the text that you", "wish to delete: \")) dosyaicerigi = dosyaicerigi.replace(silinecek, '') with open(dosyaadi,", "'w') as file: file.write(dosyaicerigi) file.close() print(\"-\" * 30) print(\"Successfully deleted!\")", "dosyaadi = input(\"Enter file name: \") dosyaadi = str(dosyaadi +", "the text that you wish to delete: \")) dosyaicerigi =", "dosyaadi = str(dosyaadi + \".txt\") with open(dosyaadi, 'r') as file", "as file: file.write(dosyaicerigi) file.close() print(\"-\" * 30) print(\"Successfully deleted!\") print(\"-\"", "+ \".txt\") with open(dosyaadi, 'r') as file : dosyaicerigi =", "text that you wish to delete: \")) dosyaicerigi = dosyaicerigi.replace(silinecek,", "dosyaicerigi = dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w') as file: file.write(dosyaicerigi)", "with open(dosyaadi, 'r') as file : dosyaicerigi = file.read() silinecek", "file.write(dosyaicerigi) file.close() print(\"-\" * 30) print(\"Successfully deleted!\") print(\"-\" * 30)", "dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w') as file: file.write(dosyaicerigi) file.close() print(\"-\"", "'') with open(dosyaadi, 'w') as file: file.write(dosyaicerigi) file.close() print(\"-\" *", "str(dosyaadi + \".txt\") with open(dosyaadi, 'r') as file : dosyaicerigi", "= str(input(\"Enter the text that you wish to delete: \"))", "that you wish to delete: \")) dosyaicerigi = dosyaicerigi.replace(silinecek, '')", "= str(dosyaadi + \".txt\") with open(dosyaadi, 'r') as file :", "= dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w') as file: file.write(dosyaicerigi) file.close()", "with open(dosyaadi, 'w') as file: file.write(dosyaicerigi) file.close() print(\"-\" * 30)", "delete: \")) dosyaicerigi = dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w') as", "\")) dosyaicerigi = dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w') as file:", "input(\"Enter file name: \") dosyaadi = str(dosyaadi + \".txt\") with", "file : dosyaicerigi = file.read() silinecek = str(input(\"Enter the text", "open(dosyaadi, 'w') as file: file.write(dosyaicerigi) file.close() print(\"-\" * 30) print(\"Successfully", "\".txt\") with open(dosyaadi, 'r') as file : dosyaicerigi = file.read()", "\") dosyaadi = str(dosyaadi + \".txt\") with open(dosyaadi, 'r') as", "as file : dosyaicerigi = file.read() silinecek = str(input(\"Enter the", "file: file.write(dosyaicerigi) file.close() print(\"-\" * 30) print(\"Successfully deleted!\") print(\"-\" *", "to delete: \")) dosyaicerigi = dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w')" ]
[ "i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Normal(y_true, y_pred,", "**kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self, epoch, logs=None):", "\"resnet50\": from tensorflow.keras.applications.resnet import ResNet50 as TFModel from tensorflow.keras.applications.resnet import", "include_top=False, weights=\"imagenet\") # Freeze base model # base_model.trainable = istrainable", "def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable = not freeze_base_model", "\"resnet101v2\": from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2 import", "tensorflow.keras.applications.vgg16 import preprocess_input elif backbone == \"efficientnetb0\": from tensorflow.keras.applications.efficientnet import", "backbone if backbone == \"resnet50\": from tensorflow.keras.applications.resnet import ResNet50 as", "######\") metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add", "TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"resnet101v2\": from", "ValueError(f\"Unknown backbone: {backbone}\") if ignore_model: model = None else: #", "if single_output_idx is None: # Multi-label print(\"###### Multi-label classification ######\")", "tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs outputs = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x)", "all layers but BatchNorm (to not destroy the non-trainable weights)", "logs) else: self.epochs_since_last_save += 1 print(f\"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup -", "elif isinstance(n, float) and 0.0 < n <= 1.0: idx", "from tensorflow.keras.applications.vgg16 import preprocess_input elif backbone == \"efficientnetb0\": from tensorflow.keras.applications.efficientnet", "import ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone", "idx = 0 if n is not None: if isinstance(n,", "input1 = model.input input2 = tf.keras.layers.Input(shape=(2,), name=\"input_2b\") # Pre-outputs 1x3", "unfreeze idx = 0 if n is not None: if", "i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred,", "from tensorflow.keras.applications.efficientnet import preprocess_input else: raise ValueError(f\"Unknown backbone: {backbone}\") if", "base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights=\"imagenet\") # Freeze base model", "if ignore_model: model = None else: # Instantiate base model", "from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"vgg16\": from tensorflow.keras.applications.vgg16", "number of layers\") # We unfreeze all layers but BatchNorm", "kwargs super().__init__(*args, **kwargs) def on_epoch_end(self, epoch, logs=None): if epoch >=", "\"efficientnetb7\": from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet import", "(wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})\") else: super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):", "backbone == \"resnet50\": from tensorflow.keras.applications.resnet import ResNet50 as TFModel from", "non-trainable weights) for layer in base_model[-idx:]: if not isinstance(layer, tf.keras.layers.BatchNormalization):", "from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"resnet101v2\": from tensorflow.keras.applications.resnet_v2", "return model, preprocess_input def add_tabular_input(model, classes): # Input1 input1 =", "Freeze base model # base_model.trainable = istrainable for layers in", "__init__(self, *args, **kwargs): self.minimum_epochs = kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs', None) #", "backbone: {backbone}\") if ignore_model: model = None else: # Instantiate", "import EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input else: raise", "y_pred[:, i]) @tf.function def BinaryAccuracy_Covid19(y_true, y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i],", "# Freeze base model # base_model.trainable = istrainable for layers", "= tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B # x = tf.keras.layers.Flatten(name='flatten')(x) #", "base_model.input x = base_model(inputs) # Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)", "as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"resnet101v2\":", "Outputs outputs = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x) model = tf.keras.Model(inputs, outputs)", "[ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics def get_model(backbone,", "tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"vgg16\": from tensorflow.keras.applications.vgg16 import", "freeze_base_model=True, ignore_model=None): istrainable = not freeze_base_model # Select backbone if", "as TFModel from tensorflow.keras.applications.vgg16 import preprocess_input elif backbone == \"efficientnetb0\":", "def BinaryAccuracy_Covid19(y_true, y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function", "import preprocess_input elif backbone == \"efficientnetb7\": from tensorflow.keras.applications.efficientnet import EfficientNetB7", "from tensorflow.keras.applications.vgg16 import VGG16 as TFModel from tensorflow.keras.applications.vgg16 import preprocess_input", "tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return", "idx = n print(f\"Unfreezing {len(base_model) - idx} layers\") elif isinstance(n,", "ignore_model=None): istrainable = not freeze_base_model # Select backbone if backbone", "= tf.keras.layers.Input(shape=(2,), name=\"input_2b\") # Pre-outputs 1x3 + 1x3 output1 =", "<= 1.0: idx = int(len(base_model) * n) print(f\"Unfreezing {idx} layers\")", "as tf @tf.function def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i],", "tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x =", "+= 1 print(f\"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})\")", "i]) @tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:,", "import preprocess_input elif backbone == \"resnet101v2\": from tensorflow.keras.applications.resnet_v2 import ResNet101V2", "- (epoch + 1)})\") else: super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def", "elif backbone == \"resnet101v2\": from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel", "freeze_base_model # Select backbone if backbone == \"resnet50\": from tensorflow.keras.applications.resnet", "classification (cls: '{single_output_idx}') ######\") metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(),", "{len(base_model) - idx} layers\") elif isinstance(n, float) and 0.0 <", "CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\")", "tf.keras.layers.Input(shape=(2,), name=\"input_2b\") # Pre-outputs 1x3 + 1x3 output1 = model.output", "@tf.function def BinaryAccuracy_Covid19(y_true, y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])", "metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add normal", "0 if n is not None: if isinstance(n, int): idx", "ValueError(\"Invalid number of layers\") # We unfreeze all layers but", "pre-trained weights base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights=\"imagenet\") # Freeze", "tensorflow.keras.applications.vgg16 import VGG16 as TFModel from tensorflow.keras.applications.vgg16 import preprocess_input elif", "not destroy the non-trainable weights) for layer in base_model[-idx:]: if", "# Select backbone if backbone == \"resnet50\": from tensorflow.keras.applications.resnet import", "(cls: '{single_output_idx}') ######\") metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall()", "tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics def get_model(backbone, classes=None,", "# Pre-outputs 1x3 + 1x3 output1 = model.output output2 =", "activation=\"sigmoid\", name='predictions')(x) model = tf.keras.Model(inputs, outputs) return model, preprocess_input def", "* n) print(f\"Unfreezing {idx} layers\") else: raise ValueError(\"Invalid number of", "else: super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args, **kwargs): self.minimum_epochs", "outputs = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x) model = tf.keras.Model(inputs, outputs) return", "layers but BatchNorm (to not destroy the non-trainable weights) for", "x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs outputs = tf.keras.layers.Dense(classes,", "unfreeze all layers but BatchNorm (to not destroy the non-trainable", "tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif", "model.input input2 = tf.keras.layers.Input(shape=(2,), name=\"input_2b\") # Pre-outputs 1x3 + 1x3", "def BinaryAccuracy_Normal(y_true, y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) class", "model = tf.keras.Model(inputs, outputs) return model, preprocess_input def add_tabular_input(model, classes):", "tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Normal(y_true, y_pred, i=3): return", "= base_model(inputs) # Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option", "return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Covid19(y_true, y_pred, i=2):", "if (epoch + 1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save", "else: raise ValueError(\"Invalid number of layers\") # We unfreeze all", "to unfreeze idx = 0 if n is not None:", "as TFModel from tensorflow.keras.applications.resnet import preprocess_input elif backbone == \"resnet50v2\":", "= model.input input2 = tf.keras.layers.Input(shape=(2,), name=\"input_2b\") # Pre-outputs 1x3 +", "i], y_pred[:, i]) @tf.function def BinaryAccuracy_Covid19(y_true, y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:,", "CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args, **kwargs): self.minimum_epochs = kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs',", "{idx} layers\") else: raise ValueError(\"Invalid number of layers\") # We", "import preprocess_input elif backbone == \"efficientnetb0\": from tensorflow.keras.applications.efficientnet import EfficientNetB0", "# Input1 input1 = model.input input2 = tf.keras.layers.Input(shape=(2,), name=\"input_2b\") #", "name='fc2')(x) # Outputs outputs = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x) model =", "model with pre-trained weights base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights=\"imagenet\")", "== \"resnet101v2\": from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2", "output2]) output = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x) model = tf.keras.Model([input1, input2],", "for layers in base_model.layers: layers.trainable = istrainable # Create a", "BatchNorm (to not destroy the non-trainable weights) for layer in", "self.wait_epoch_warmup: if (epoch + 1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else:", "kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs super().__init__(*args,", "tf.keras.Model(inputs, outputs) return model, preprocess_input def add_tabular_input(model, classes): # Input1", "i], y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args, **kwargs): super().__init__(*args,", "from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input", "tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x) model = tf.keras.Model([input1, input2], output) return model", "def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function", "= [tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx, add_normal=False): metrics = []", "y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Normal(y_true,", "as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input elif backbone == \"efficientnetb7\":", "+= [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add normal class", "output2 = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2) # Outputs x = tf.keras.layers.Concatenate(axis=1)([output1,", "self.epochs_since_last_save += 1 print(f\"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch +", "n) print(f\"Unfreezing {idx} layers\") else: raise ValueError(\"Invalid number of layers\")", "super().on_epoch_end(epoch, logs) def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return losses def", "x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x) model", "from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input", "layers\") else: raise ValueError(\"Invalid number of layers\") # We unfreeze", "# Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B #", "model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})\") else: super().on_epoch_end(epoch, logs) class", "tensorflow.keras.applications.efficientnet import preprocess_input elif backbone == \"efficientnetb7\": from tensorflow.keras.applications.efficientnet import", "name='predictions')(x) model = tf.keras.Model(inputs, outputs) return model, preprocess_input def add_tabular_input(model,", "is None: # Multi-label print(\"###### Multi-label classification ######\") metrics +=", "inputs = base_model.input x = base_model(inputs) # Option A x", "import ResNet50 as TFModel from tensorflow.keras.applications.resnet import preprocess_input elif backbone", "import tensorflow as tf @tf.function def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return", "# Add normal class if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f\"###### Multi-class", "\"vgg16\": from tensorflow.keras.applications.vgg16 import VGG16 as TFModel from tensorflow.keras.applications.vgg16 import", "3), include_top=False, weights=\"imagenet\") # Freeze base model # base_model.trainable =", "# We unfreeze all layers but BatchNorm (to not destroy", "return losses def get_metrics(single_output_idx, add_normal=False): metrics = [] if single_output_idx", "*args, **kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self, epoch,", "layers to unfreeze idx = 0 if n is not", "tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input else:", "preprocess_input elif backbone == \"efficientnetb0\": from tensorflow.keras.applications.efficientnet import EfficientNetB0 as", "Problems with EarlyStopping kwargs super().__init__(*args, **kwargs) def on_epoch_end(self, epoch, logs=None):", "= tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)", "self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save += 1 print(f\"Skipping save model", "n is not None: if isinstance(n, int): idx = n", "preprocess_input def add_tabular_input(model, classes): # Input1 input1 = model.input input2", "x = tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) #", "'{single_output_idx}') ######\") metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ]", ">= self.minimum_epochs: super().on_epoch_end(epoch, logs) def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return", "save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})\") else: super().on_epoch_end(epoch, logs)", "[tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx, add_normal=False): metrics = [] if", "self.minimum_epochs = kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping", "i]) @tf.function def BinaryAccuracy_Covid19(y_true, y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:,", "= istrainable # Create a new model on top inputs", "EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input elif backbone ==", "ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone ==", "ignore_model: model = None else: # Instantiate base model with", "= model.layers[1].layers # Select number of layers to unfreeze idx", "elif backbone == \"efficientnetb7\": from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel", "on_epoch_end(self, epoch, logs=None): if epoch >= self.minimum_epochs: super().on_epoch_end(epoch, logs) def", "self.minimum_epochs: super().on_epoch_end(epoch, logs) def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return losses", "TFModel from tensorflow.keras.applications.efficientnet import preprocess_input else: raise ValueError(f\"Unknown backbone: {backbone}\")", "of layers\") # We unfreeze all layers but BatchNorm (to", "epoch >= self.minimum_epochs: super().on_epoch_end(epoch, logs) def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()]", "base_model = model.layers[1].layers # Select number of layers to unfreeze", "base_model(inputs) # Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B", "(to not destroy the non-trainable weights) for layer in base_model[-idx:]:", "logs) def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx,", "losses def get_metrics(single_output_idx, add_normal=False): metrics = [] if single_output_idx is", "top inputs = base_model.input x = base_model(inputs) # Option A", "] return metrics def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable", "import preprocess_input elif backbone == \"vgg16\": from tensorflow.keras.applications.vgg16 import VGG16", "= tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x) model = tf.keras.Model(inputs, outputs) return model,", "else: print(f\"###### Multi-class classification (cls: '{single_output_idx}') ######\") metrics = [", "def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx, add_normal=False):", "= tf.keras.Model(inputs, outputs) return model, preprocess_input def add_tabular_input(model, classes): #", "activation=\"sigmoid\", name='output_tab')(input2) # Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output =", "single_output_idx is None: # Multi-label print(\"###### Multi-label classification ######\") metrics", "weights=\"imagenet\") # Freeze base model # base_model.trainable = istrainable for", "x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x = tf.keras.layers.Dense(512, activation='relu',", "def __init__(self, *args, **kwargs): self.minimum_epochs = kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs', None)", "tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x) model = tf.keras.Model([input1,", "import ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone", "@tf.function def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])", "# Option B # x = tf.keras.layers.Flatten(name='flatten')(x) # x =", "tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input elif", "i], y_pred[:, i]) @tf.function def BinaryAccuracy_Normal(y_true, y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:,", "= base_model.input x = base_model(inputs) # Option A x =", "add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f\"###### Multi-class classification (cls: '{single_output_idx}') ######\") metrics", "== \"resnet50\": from tensorflow.keras.applications.resnet import ResNet50 as TFModel from tensorflow.keras.applications.resnet", "of layers to unfreeze idx = 0 if n is", "= tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs outputs = tf.keras.layers.Dense(classes, activation=\"sigmoid\",", "= int(len(base_model) * n) print(f\"Unfreezing {idx} layers\") else: raise ValueError(\"Invalid", "= tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2) # Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2])", "import VGG16 as TFModel from tensorflow.keras.applications.vgg16 import preprocess_input elif backbone", "from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input", "isinstance(n, float) and 0.0 < n <= 1.0: idx =", "preprocess_input elif backbone == \"efficientnetb7\": from tensorflow.keras.applications.efficientnet import EfficientNetB7 as", "layer in base_model[-idx:]: if not isinstance(layer, tf.keras.layers.BatchNormalization): layer.trainable = True", "TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"vgg16\": from", "backbone == \"resnet101v2\": from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel from", "if epoch >= self.minimum_epochs: super().on_epoch_end(epoch, logs) def get_losses(): losses =", "# base_model.trainable = istrainable for layers in base_model.layers: layers.trainable =", "0) kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs super().__init__(*args, **kwargs)", "idx = int(len(base_model) * n) print(f\"Unfreezing {idx} layers\") else: raise", "elif backbone == \"resnet50v2\": from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel", "self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self, epoch, logs=None): if self.wait_epoch_warmup: if", "elif backbone == \"vgg16\": from tensorflow.keras.applications.vgg16 import VGG16 as TFModel", "0.0 < n <= 1.0: idx = int(len(base_model) * n)", "i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup", "metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics", "{backbone}\") if ignore_model: model = None else: # Instantiate base", "with pre-trained weights base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights=\"imagenet\") #", "output) return model def unfreeze_base_model(model, n=None, unfreeze=True): base_model = model.layers[1].layers", "1)})\") else: super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args, **kwargs):", "ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone ==", "backbone == \"resnet50v2\": from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel from", "add_normal=False): metrics = [] if single_output_idx is None: # Multi-label", "model = tf.keras.Model([input1, input2], output) return model def unfreeze_base_model(model, n=None,", "# Instantiate base model with pre-trained weights base_model = TFModel(input_shape=(*target_size,", "from tensorflow.keras.applications.resnet import preprocess_input elif backbone == \"resnet50v2\": from tensorflow.keras.applications.resnet_v2", "n=None, unfreeze=True): base_model = model.layers[1].layers # Select number of layers", "else: self.epochs_since_last_save += 1 print(f\"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch", "y_pred[:, i]) @tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i],", "output = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x) model = tf.keras.Model([input1, input2], output)", "if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f\"###### Multi-class classification (cls: '{single_output_idx}') ######\")", "< n <= 1.0: idx = int(len(base_model) * n) print(f\"Unfreezing", "+ 1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save += 1", "VGG16 as TFModel from tensorflow.keras.applications.vgg16 import preprocess_input elif backbone ==", "from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input", "istrainable # Create a new model on top inputs =", "x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B # x = tf.keras.layers.Flatten(name='flatten')(x)", "= tf.keras.Model([input1, input2], output) return model def unfreeze_base_model(model, n=None, unfreeze=True):", "[] if single_output_idx is None: # Multi-label print(\"###### Multi-label classification", "= [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics def", "= tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x) model =", "for layer in base_model[-idx:]: if not isinstance(layer, tf.keras.layers.BatchNormalization): layer.trainable =", "] # Add normal class if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f\"######", "weights base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights=\"imagenet\") # Freeze base", "@tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])", "preprocess_input elif backbone == \"vgg16\": from tensorflow.keras.applications.vgg16 import VGG16 as", "y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Pneumonia(y_true,", "None: if isinstance(n, int): idx = n print(f\"Unfreezing {len(base_model) -", "activation='relu', name='fc1')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs", "number of layers to unfreeze idx = 0 if n", "activation=\"sigmoid\", name='final_predictions')(x) model = tf.keras.Model([input1, input2], output) return model def", "metrics = [] if single_output_idx is None: # Multi-label print(\"######", "TFModel from tensorflow.keras.applications.efficientnet import preprocess_input elif backbone == \"efficientnetb7\": from", "outputs) return model, preprocess_input def add_tabular_input(model, classes): # Input1 input1", "def unfreeze_base_model(model, n=None, unfreeze=True): base_model = model.layers[1].layers # Select number", "BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def", "print(f\"Unfreezing {idx} layers\") else: raise ValueError(\"Invalid number of layers\") #", "class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args, **kwargs): self.minimum_epochs = kwargs.get(\"minimum_epochs\", 0)", "def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function", "= tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x) model = tf.keras.Model([input1, input2], output) return", "= 0 if n is not None: if isinstance(n, int):", "tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2) # Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output", "== \"resnet50v2\": from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2", "Instantiate base model with pre-trained weights base_model = TFModel(input_shape=(*target_size, 3),", "super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save += 1 print(f\"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup", "EarlyStopping kwargs super().__init__(*args, **kwargs) def on_epoch_end(self, epoch, logs=None): if epoch", "[ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add normal class if", "backbone == \"efficientnetb0\": from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel from", "return model def unfreeze_base_model(model, n=None, unfreeze=True): base_model = model.layers[1].layers #", "import EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input elif backbone", "Option B # x = tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512,", "kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs super().__init__(*args, **kwargs) def", "print(\"###### Multi-label classification ######\") metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19", "name='output_tab')(input2) # Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes,", "from tensorflow.keras.applications.resnet import ResNet50 as TFModel from tensorflow.keras.applications.resnet import preprocess_input", "model.output output2 = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2) # Outputs x =", "Add normal class if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f\"###### Multi-class classification", "normal class if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f\"###### Multi-class classification (cls:", "base_model.layers: layers.trainable = istrainable # Create a new model on", "logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args, **kwargs): self.minimum_epochs = kwargs.get(\"minimum_epochs\",", "i]) @tf.function def BinaryAccuracy_Normal(y_true, y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:,", "is not None: if isinstance(n, int): idx = n print(f\"Unfreezing", "not freeze_base_model # Select backbone if backbone == \"resnet50\": from", "i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self,", "= istrainable for layers in base_model.layers: layers.trainable = istrainable #", "1.0: idx = int(len(base_model) * n) print(f\"Unfreezing {idx} layers\") else:", "add_tabular_input(model, classes): # Input1 input1 = model.input input2 = tf.keras.layers.Input(shape=(2,),", "not None: if isinstance(n, int): idx = n print(f\"Unfreezing {len(base_model)", "Select number of layers to unfreeze idx = 0 if", "= kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self, epoch, logs=None): if self.wait_epoch_warmup: if (epoch", "a new model on top inputs = base_model.input x =", "from tensorflow.keras.applications.efficientnet import preprocess_input elif backbone == \"efficientnetb7\": from tensorflow.keras.applications.efficientnet", "as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input else: raise ValueError(f\"Unknown backbone:", "tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics def get_model(backbone, classes=None, target_size=None,", "None else: # Instantiate base model with pre-trained weights base_model", "*args, **kwargs): self.minimum_epochs = kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs', None) # Problems", "get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable = not freeze_base_model #", "raise ValueError(\"Invalid number of layers\") # We unfreeze all layers", "name='final_predictions')(x) model = tf.keras.Model([input1, input2], output) return model def unfreeze_base_model(model,", "B # x = tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512, activation='relu',", "losses = [tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx, add_normal=False): metrics =", "tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Covid19(y_true, y_pred, i=2): return", "# Problems with EarlyStopping kwargs super().__init__(*args, **kwargs) def on_epoch_end(self, epoch,", "super().__init__(*args, **kwargs) self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self, epoch, logs=None): if", "print(f\"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})\") else: super().on_epoch_end(epoch,", "Select backbone if backbone == \"resnet50\": from tensorflow.keras.applications.resnet import ResNet50", "super().__init__(*args, **kwargs) def on_epoch_end(self, epoch, logs=None): if epoch >= self.minimum_epochs:", "x = base_model(inputs) # Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) #", "Multi-label classification ######\") metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ]", "Create a new model on top inputs = base_model.input x", "as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"vgg16\":", "tf.keras.Model([input1, input2], output) return model def unfreeze_base_model(model, n=None, unfreeze=True): base_model", "kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self, epoch, logs=None): if self.wait_epoch_warmup: if (epoch +", "# Outputs outputs = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x) model = tf.keras.Model(inputs,", "class if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f\"###### Multi-class classification (cls: '{single_output_idx}')", "Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='final_predictions')(x)", "tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args, **kwargs):", "BinaryAccuracy_Covid19(y_true, y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def", "tf.keras.metrics.Recall() ] return metrics def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None):", "model, preprocess_input def add_tabular_input(model, classes): # Input1 input1 = model.input", "raise ValueError(f\"Unknown backbone: {backbone}\") if ignore_model: model = None else:", "= not freeze_base_model # Select backbone if backbone == \"resnet50\":", "tensorflow.keras.applications.resnet import preprocess_input elif backbone == \"resnet50v2\": from tensorflow.keras.applications.resnet_v2 import", "int(len(base_model) * n) print(f\"Unfreezing {idx} layers\") else: raise ValueError(\"Invalid number", "tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) #", ">= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save += 1 print(f\"Skipping save", "import preprocess_input elif backbone == \"resnet50v2\": from tensorflow.keras.applications.resnet_v2 import ResNet50V2", "name=\"input_2b\") # Pre-outputs 1x3 + 1x3 output1 = model.output output2", "= model.output output2 = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2) # Outputs x", "def add_tabular_input(model, classes): # Input1 input1 = model.input input2 =", "new model on top inputs = base_model.input x = base_model(inputs)", "model on top inputs = base_model.input x = base_model(inputs) #", "BinaryAccuracy_Covid19 ] # Add normal class if add_normal: metrics.append(BinaryAccuracy_Normal) else:", "tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B # x = tf.keras.layers.Flatten(name='flatten')(x) # x", "if isinstance(n, int): idx = n print(f\"Unfreezing {len(base_model) - idx}", "layers\") # We unfreeze all layers but BatchNorm (to not", "target_size=None, freeze_base_model=True, ignore_model=None): istrainable = not freeze_base_model # Select backbone", "with EarlyStopping kwargs super().__init__(*args, **kwargs) def on_epoch_end(self, epoch, logs=None): if", "def on_epoch_end(self, epoch, logs=None): if epoch >= self.minimum_epochs: super().on_epoch_end(epoch, logs)", "TFModel from tensorflow.keras.applications.vgg16 import preprocess_input elif backbone == \"efficientnetb0\": from", "if n is not None: if isinstance(n, int): idx =", "def get_metrics(single_output_idx, add_normal=False): metrics = [] if single_output_idx is None:", "= [] if single_output_idx is None: # Multi-label print(\"###### Multi-label", "1 print(f\"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})\") else:", "Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B # x", "the non-trainable weights) for layer in base_model[-idx:]: if not isinstance(layer,", "base model # base_model.trainable = istrainable for layers in base_model.layers:", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self,", "else: raise ValueError(f\"Unknown backbone: {backbone}\") if ignore_model: model = None", "return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Normal(y_true, y_pred, i=3):", "i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Covid19(y_true, y_pred,", "in base_model.layers: layers.trainable = istrainable # Create a new model", "i], y_pred[:, i]) @tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:,", "# Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes, activation=\"sigmoid\",", "We unfreeze all layers but BatchNorm (to not destroy the", "istrainable for layers in base_model.layers: layers.trainable = istrainable # Create", "y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "None) # Problems with EarlyStopping kwargs super().__init__(*args, **kwargs) def on_epoch_end(self,", "tensorflow.keras.applications.efficientnet import preprocess_input else: raise ValueError(f\"Unknown backbone: {backbone}\") if ignore_model:", "and 0.0 < n <= 1.0: idx = int(len(base_model) *", "######\") metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return", "# Select number of layers to unfreeze idx = 0", "weights) for layer in base_model[-idx:]: if not isinstance(layer, tf.keras.layers.BatchNormalization): layer.trainable", "unfreeze_base_model(model, n=None, unfreeze=True): base_model = model.layers[1].layers # Select number of", "preprocess_input elif backbone == \"resnet50v2\": from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as", "super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args, **kwargs): self.minimum_epochs =", "isinstance(n, int): idx = n print(f\"Unfreezing {len(base_model) - idx} layers\")", "preprocess_input elif backbone == \"resnet101v2\": from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as", "if self.wait_epoch_warmup: if (epoch + 1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs)", "= None else: # Instantiate base model with pre-trained weights", "return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1):", "tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == \"resnet101v2\": from tensorflow.keras.applications.resnet_v2 import", "tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics def get_model(backbone, classes=None, target_size=None, freeze_base_model=True,", "class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup =", "on top inputs = base_model.input x = base_model(inputs) # Option", "**kwargs) def on_epoch_end(self, epoch, logs=None): if epoch >= self.minimum_epochs: super().on_epoch_end(epoch,", "# Multi-label print(\"###### Multi-label classification ######\") metrics += [ BinaryAccuracy_Infiltrates,", "+ 1x3 output1 = model.output output2 = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2)", "# x = tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)", "if backbone == \"resnet50\": from tensorflow.keras.applications.resnet import ResNet50 as TFModel", "int): idx = n print(f\"Unfreezing {len(base_model) - idx} layers\") elif", "**kwargs) self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\") def on_epoch_end(self, epoch, logs=None): if self.wait_epoch_warmup:", "EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input else: raise ValueError(f\"Unknown", "y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def", "on_epoch_end(self, epoch, logs=None): if self.wait_epoch_warmup: if (epoch + 1) >=", "tensorflow.keras.applications.resnet import ResNet50 as TFModel from tensorflow.keras.applications.resnet import preprocess_input elif", "TFModel from tensorflow.keras.applications.resnet import preprocess_input elif backbone == \"resnet50v2\": from", "# x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs outputs =", "input2 = tf.keras.layers.Input(shape=(2,), name=\"input_2b\") # Pre-outputs 1x3 + 1x3 output1", "== \"vgg16\": from tensorflow.keras.applications.vgg16 import VGG16 as TFModel from tensorflow.keras.applications.vgg16", "model.layers[1].layers # Select number of layers to unfreeze idx =", "BinaryAccuracy_Normal(y_true, y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):", "unfreeze=True): base_model = model.layers[1].layers # Select number of layers to", "metrics def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable = not", "@tf.function def BinaryAccuracy_Normal(y_true, y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])", "tensorflow as tf @tf.function def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:,", "\"efficientnetb0\": from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet import", "ResNet50 as TFModel from tensorflow.keras.applications.resnet import preprocess_input elif backbone ==", "== \"efficientnetb0\": from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet", "A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B # x =", "layers\") elif isinstance(n, float) and 0.0 < n <= 1.0:", "base model with pre-trained weights base_model = TFModel(input_shape=(*target_size, 3), include_top=False,", "**kwargs): self.minimum_epochs = kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs', None) # Problems with", "# Create a new model on top inputs = base_model.input", "y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Covid19(y_true,", "idx} layers\") elif isinstance(n, float) and 0.0 < n <=", "tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x) model = tf.keras.Model(inputs, outputs) return model, preprocess_input", "1x3 + 1x3 output1 = model.output output2 = tf.keras.layers.Dense(classes, activation=\"sigmoid\",", "(epoch + 1)})\") else: super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self,", "\"resnet50v2\": from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2 import", "layers in base_model.layers: layers.trainable = istrainable # Create a new", "= n print(f\"Unfreezing {len(base_model) - idx} layers\") elif isinstance(n, float)", "get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx, add_normal=False): metrics", "tf @tf.function def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:,", "model # base_model.trainable = istrainable for layers in base_model.layers: layers.trainable", "float) and 0.0 < n <= 1.0: idx = int(len(base_model)", "classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable = not freeze_base_model # Select", "backbone == \"efficientnetb7\": from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel from", "== \"efficientnetb7\": from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet", "layers.trainable = istrainable # Create a new model on top", "n <= 1.0: idx = int(len(base_model) * n) print(f\"Unfreezing {idx}", "Input1 input1 = model.input input2 = tf.keras.layers.Input(shape=(2,), name=\"input_2b\") # Pre-outputs", "Pre-outputs 1x3 + 1x3 output1 = model.output output2 = tf.keras.layers.Dense(classes,", "istrainable = not freeze_base_model # Select backbone if backbone ==", "= TFModel(input_shape=(*target_size, 3), include_top=False, weights=\"imagenet\") # Freeze base model #", "1x3 output1 = model.output output2 = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2) #", "model def unfreeze_base_model(model, n=None, unfreeze=True): base_model = model.layers[1].layers # Select", "classification ######\") metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] #", "preprocess_input else: raise ValueError(f\"Unknown backbone: {backbone}\") if ignore_model: model =", "return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args,", "print(f\"###### Multi-class classification (cls: '{single_output_idx}') ######\") metrics = [ tf.keras.metrics.BinaryAccuracy(),", "- idx} layers\") elif isinstance(n, float) and 0.0 < n", "1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save += 1 print(f\"Skipping", "activation='relu', name='fc2')(x) # Outputs outputs = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='predictions')(x) model", "output1 = model.output output2 = tf.keras.layers.Dense(classes, activation=\"sigmoid\", name='output_tab')(input2) # Outputs", "name='fc1')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs outputs", "epoch, logs=None): if epoch >= self.minimum_epochs: super().on_epoch_end(epoch, logs) def get_losses():", "y_pred[:, i]) @tf.function def BinaryAccuracy_Normal(y_true, y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i],", "(epoch + 1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save +=", "BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add normal class if add_normal:", "metrics.append(BinaryAccuracy_Normal) else: print(f\"###### Multi-class classification (cls: '{single_output_idx}') ######\") metrics =", "but BatchNorm (to not destroy the non-trainable weights) for layer", "n print(f\"Unfreezing {len(base_model) - idx} layers\") elif isinstance(n, float) and", "Multi-class classification (cls: '{single_output_idx}') ######\") metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(),", "model = None else: # Instantiate base model with pre-trained", "logs=None): if epoch >= self.minimum_epochs: super().on_epoch_end(epoch, logs) def get_losses(): losses", "Multi-label print(\"###### Multi-label classification ######\") metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia,", "backbone == \"vgg16\": from tensorflow.keras.applications.vgg16 import VGG16 as TFModel from", "+ 1)})\") else: super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args,", "destroy the non-trainable weights) for layer in base_model[-idx:]: if not", "TFModel(input_shape=(*target_size, 3), include_top=False, weights=\"imagenet\") # Freeze base model # base_model.trainable", "logs=None): if self.wait_epoch_warmup: if (epoch + 1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch,", "else: # Instantiate base model with pre-trained weights base_model =", "BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def", "print(f\"Unfreezing {len(base_model) - idx} layers\") elif isinstance(n, float) and 0.0", "input2], output) return model def unfreeze_base_model(model, n=None, unfreeze=True): base_model =", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup = kwargs.get(\"wait_epoch_warmup\") def", "None: # Multi-label print(\"###### Multi-label classification ######\") metrics += [", "base_model.trainable = istrainable for layers in base_model.layers: layers.trainable = istrainable", "= kwargs.get(\"minimum_epochs\", 0) kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs", "elif backbone == \"efficientnetb0\": from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel", "BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add normal class if add_normal: metrics.append(BinaryAccuracy_Normal)", "get_metrics(single_output_idx, add_normal=False): metrics = [] if single_output_idx is None: #", "return metrics def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable =", "import preprocess_input else: raise ValueError(f\"Unknown backbone: {backbone}\") if ignore_model: model", "# x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x = tf.keras.layers.Dense(512,", "def on_epoch_end(self, epoch, logs=None): if self.wait_epoch_warmup: if (epoch + 1)", "epoch, logs=None): if self.wait_epoch_warmup: if (epoch + 1) >= self.wait_epoch_warmup:", "tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif", "= tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x", "classes): # Input1 input1 = model.input input2 = tf.keras.layers.Input(shape=(2,), name=\"input_2b\")" ]
[ "magic class TwitterMediaDL: http = httplib2.Http(\".cache\") baseUrl = \"https://twitter.com\" consumer_key", "in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1])) max_id = int(res['max_id']) return list(set(ids)) def", "access_token_secret = \"\" t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret)) remaining", "+ ':orig') return lst def check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining", "time.time() print(\"Please wait... {:f}\".format(reset), file=sys.stderr) time.sleep(reset + 10) @staticmethod def", "wait... {:f}\".format(reset), file=sys.stderr) time.sleep(reset + 10) @staticmethod def get_file_extension(binary): mime", "time import sys import httplib2 from twitter import * import", "+ '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() res = json.loads(res_raw) if", "nickname).decode()): ids.append(int(tweet[1])) max_id = ids[len(ids) - 1] while 1: res_raw", "print(res_raw) time.sleep(5) res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % (", "or self.remaining <= 1: self.check_limit() r = self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining", "= json.loads(res_raw) except: print(res_raw) time.sleep(5) res_raw = self.http_wrapper( self.baseUrl +", "access_token_key = \"\" access_token_secret = \"\" t = Twitter(auth=OAuth(access_token_key, access_token_secret,", "None def http_wrapper(self, uri): resp, content = self.http.request( uri=uri, method='GET'", "class TwitterMediaDL: http = httplib2.Http(\".cache\") baseUrl = \"https://twitter.com\" consumer_key =", "import json import time import sys import httplib2 from twitter", "for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()): ids.append(int(tweet[1]))", "def check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining'] print(\"API Limit", "__name__ == '__main__': for i in range(1, len(sys.argv)): tw =", "= [] if self.remaining is None or self.remaining % 10", "consumer_secret)) remaining = None def http_wrapper(self, uri): resp, content =", "is 0 or self.remaining <= 1: self.check_limit() r = self.t.statuses.show(_id=tweet_id,", "r['limit']), file=sys.stderr) if r['remaining'] / r['limit'] < 0.10: reset =", "return mime.split('/')[1] @staticmethod def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, \"%a %b %d", "self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining'] print(\"API Limit : {:d} / {:d}", "tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()): ids.append(int(tweet[1])) max_id", "/ r['limit'] < 0.10: reset = r['reset'] - time.time() print(\"Please", "time.sleep(5) res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname,", "nickname): ids = [] for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl +", "get_file_extension(binary): mime = magic.from_buffer(binary, True).decode() return mime.split('/')[1] @staticmethod def get_unix_epoch(created_at):", "httplib2 from twitter import * import magic class TwitterMediaDL: http", "= \"\" t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret)) remaining =", "= self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() res", "def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, \"%a %b %d %H:%M:%S +0000 %Y\")))", "\"\" access_token_key = \"\" access_token_secret = \"\" t = Twitter(auth=OAuth(access_token_key,", "json import time import sys import httplib2 from twitter import", "ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as f:", "get_medias(self, nickname): ids = [] for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl", "_method='GET') self.remaining -= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for m in", "= \"\" access_token_secret = \"\" t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key,", "r['remaining'] / r['limit']), file=sys.stderr) if r['remaining'] / r['limit'] < 0.10:", "get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, \"%a %b %d %H:%M:%S +0000 %Y\"))) if", "or self.remaining % 10 is 0 or self.remaining <= 1:", "res = json.loads(res_raw) if not res['has_more_items']: break for tweet in", "%H:%M:%S +0000 %Y\"))) if __name__ == '__main__': for i in", "TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for j", "Limit : {:d} / {:d} = {:f}\".format(r['remaining'], r['limit'], r['remaining'] /", "= self.http.request( uri=uri, method='GET' ) return content def get_medias(self, nickname):", "'__main__': for i in range(1, len(sys.argv)): tw = TwitterMediaDL() for", "== '__main__': for i in range(1, len(sys.argv)): tw = TwitterMediaDL()", "t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret)) remaining = None def", "TwitterMediaDL: http = httplib2.Http(\".cache\") baseUrl = \"https://twitter.com\" consumer_key = \"\"", "-= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for m in r['entities']['media']: lst.append(m['media_url']", "print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for m in r['entities']['media']: lst.append(m['media_url'] + ':orig')", "1] while 1: res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' %", "r['reset'] - time.time() print(\"Please wait... {:f}\".format(reset), file=sys.stderr) time.sleep(reset + 10)", "- time.time() print(\"Please wait... {:f}\".format(reset), file=sys.stderr) time.sleep(reset + 10) @staticmethod", "<= 1: self.check_limit() r = self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -= 1", "remaining = None def http_wrapper(self, uri): resp, content = self.http.request(", "self.remaining <= 1: self.check_limit() r = self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -=", "{:f}\".format(r['remaining'], r['limit'], r['remaining'] / r['limit']), file=sys.stderr) if r['remaining'] / r['limit']", "% nickname).decode()): ids.append(int(tweet[1])) max_id = ids[len(ids) - 1] while 1:", "self.remaining % 10 is 0 or self.remaining <= 1: self.check_limit()", "self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() try: res", "= r['reset'] - time.time() print(\"Please wait... {:f}\".format(reset), file=sys.stderr) time.sleep(reset +", "( nickname, max_id)).decode() res = json.loads(res_raw) if not res['has_more_items']: break", "0.10: reset = r['reset'] - time.time() print(\"Please wait... {:f}\".format(reset), file=sys.stderr)", "@staticmethod def get_file_extension(binary): mime = magic.from_buffer(binary, True).decode() return mime.split('/')[1] @staticmethod", "self.remaining is None or self.remaining % 10 is 0 or", "res['has_more_items']: break for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1])) max_id =", "for tweetID in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for j in", "+ '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() try: res = json.loads(res_raw)", "file=sys.stderr) if r['remaining'] / r['limit'] < 0.10: reset = r['reset']", "httplib2.Http(\".cache\") baseUrl = \"https://twitter.com\" consumer_key = \"\" consumer_secret = \"\"", "range(0, len(list_url)): raw = tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID,", "self.remaining -= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for m in r['entities']['media']:", "[] if self.remaining is None or self.remaining % 10 is", "= ids[len(ids) - 1] while 1: res_raw = self.http_wrapper( self.baseUrl", "= tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb')", "tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as", "return content def get_medias(self, nickname): ids = [] for tweet", "* import magic class TwitterMediaDL: http = httplib2.Http(\".cache\") baseUrl =", "\"https://twitter.com\" consumer_key = \"\" consumer_secret = \"\" access_token_key = \"\"", "lst def check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining'] print(\"API", "int(time.mktime(time.strptime(created_at, \"%a %b %d %H:%M:%S +0000 %Y\"))) if __name__ ==", "import magic class TwitterMediaDL: http = httplib2.Http(\".cache\") baseUrl = \"https://twitter.com\"", "Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret)) remaining = None def http_wrapper(self, uri):", "r['remaining'] / r['limit'] < 0.10: reset = r['reset'] - time.time()", "':orig') return lst def check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining =", "for i in range(1, len(sys.argv)): tw = TwitterMediaDL() for tweetID", "= tw.get_image_url(tweetID) for j in range(0, len(list_url)): raw = tw.http_wrapper(list_url[j])", "json.loads(res_raw) if not res['has_more_items']: break for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']):", ") return content def get_medias(self, nickname): ids = [] for", "break for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1])) max_id = int(res['max_id'])", "consumer_secret = \"\" access_token_key = \"\" access_token_secret = \"\" t", "for m in r['entities']['media']: lst.append(m['media_url'] + ':orig') return lst def", "res = json.loads(res_raw) except: print(res_raw) time.sleep(5) res_raw = self.http_wrapper( self.baseUrl", "0 or self.remaining <= 1: self.check_limit() r = self.t.statuses.show(_id=tweet_id, _method='GET')", "= TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for", "def get_image_url(self, tweet_id): lst = [] if self.remaining is None", "while 1: res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % (", "if r['remaining'] / r['limit'] < 0.10: reset = r['reset'] -", "r['limit'] < 0.10: reset = r['reset'] - time.time() print(\"Please wait...", "self.get_unix_epoch(r['created_at']), r['text'])) for m in r['entities']['media']: lst.append(m['media_url'] + ':orig') return", "r['text'])) for m in r['entities']['media']: lst.append(m['media_url'] + ':orig') return lst", "r['entities']['media']: lst.append(m['media_url'] + ':orig') return lst def check_limit(self): r =", "= int(res['max_id']) return list(set(ids)) def get_image_url(self, tweet_id): lst = []", "list(set(ids)) def get_image_url(self, tweet_id): lst = [] if self.remaining is", "tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1])) max_id = int(res['max_id']) return list(set(ids))", "= self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining'] print(\"API Limit : {:d} /", "1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for m in r['entities']['media']: lst.append(m['media_url'] +", "% 10 is 0 or self.remaining <= 1: self.check_limit() r", "tw = TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID)", "raw = tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext),", "self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() res = json.loads(res_raw)", "self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() try: res =", "= r['remaining'] print(\"API Limit : {:d} / {:d} = {:f}\".format(r['remaining'],", "max_id = ids[len(ids) - 1] while 1: res_raw = self.http_wrapper(", "import re import json import time import sys import httplib2", "return list(set(ids)) def get_image_url(self, tweet_id): lst = [] if self.remaining", "= magic.from_buffer(binary, True).decode() return mime.split('/')[1] @staticmethod def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at,", "max_id)).decode() try: res = json.loads(res_raw) except: print(res_raw) time.sleep(5) res_raw =", "{:d} = {:f}\".format(r['remaining'], r['limit'], r['remaining'] / r['limit']), file=sys.stderr) if r['remaining']", "re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()): ids.append(int(tweet[1])) max_id = ids[len(ids)", "i in range(1, len(sys.argv)): tw = TwitterMediaDL() for tweetID in", "access_token_secret, consumer_key, consumer_secret)) remaining = None def http_wrapper(self, uri): resp,", "= self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for", "10) @staticmethod def get_file_extension(binary): mime = magic.from_buffer(binary, True).decode() return mime.split('/')[1]", "= Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret)) remaining = None def http_wrapper(self,", "( nickname, max_id)).decode() try: res = json.loads(res_raw) except: print(res_raw) time.sleep(5)", "if self.remaining is None or self.remaining % 10 is 0", "def get_file_extension(binary): mime = magic.from_buffer(binary, True).decode() return mime.split('/')[1] @staticmethod def", "return int(time.mktime(time.strptime(created_at, \"%a %b %d %H:%M:%S +0000 %Y\"))) if __name__", "nickname, max_id)).decode() res = json.loads(res_raw) if not res['has_more_items']: break for", "tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for j in range(0, len(list_url)): raw", "self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for m", "list_url = tw.get_image_url(tweetID) for j in range(0, len(list_url)): raw =", "%d %H:%M:%S +0000 %Y\"))) if __name__ == '__main__': for i", "self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()): ids.append(int(tweet[1])) max_id = ids[len(ids) -", "+ 10) @staticmethod def get_file_extension(binary): mime = magic.from_buffer(binary, True).decode() return", "not res['has_more_items']: break for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1])) max_id", "< 0.10: reset = r['reset'] - time.time() print(\"Please wait... {:f}\".format(reset),", "tw.get_image_url(tweetID) for j in range(0, len(list_url)): raw = tw.http_wrapper(list_url[j]) ext", "r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining'] print(\"API Limit : {:d}", "= \"\" access_token_key = \"\" access_token_secret = \"\" t =", "[] for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()):", "10 is 0 or self.remaining <= 1: self.check_limit() r =", "m in r['entities']['media']: lst.append(m['media_url'] + ':orig') return lst def check_limit(self):", "= {:f}\".format(r['remaining'], r['limit'], r['remaining'] / r['limit']), file=sys.stderr) if r['remaining'] /", "if not res['has_more_items']: break for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1]))", "/ r['limit']), file=sys.stderr) if r['remaining'] / r['limit'] < 0.10: reset", "\"%a %b %d %H:%M:%S +0000 %Y\"))) if __name__ == '__main__':", "r = self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text']))", "for j in range(0, len(list_url)): raw = tw.http_wrapper(list_url[j]) ext =", "res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode()", "%Y\"))) if __name__ == '__main__': for i in range(1, len(sys.argv)):", "+ '/%s/media' % nickname).decode()): ids.append(int(tweet[1])) max_id = ids[len(ids) - 1]", "return lst def check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining']", "j in range(0, len(list_url)): raw = tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw)", "from twitter import * import magic class TwitterMediaDL: http =", "% ( nickname, max_id)).decode() res = json.loads(res_raw) if not res['has_more_items']:", "range(1, len(sys.argv)): tw = TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]): list_url", "import httplib2 from twitter import * import magic class TwitterMediaDL:", "r['limit'], r['remaining'] / r['limit']), file=sys.stderr) if r['remaining'] / r['limit'] <", "except: print(res_raw) time.sleep(5) res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' %", "% ( nickname, max_id)).decode() try: res = json.loads(res_raw) except: print(res_raw)", "= json.loads(res_raw) if not res['has_more_items']: break for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\",", "print(\"Please wait... {:f}\".format(reset), file=sys.stderr) time.sleep(reset + 10) @staticmethod def get_file_extension(binary):", "uri): resp, content = self.http.request( uri=uri, method='GET' ) return content", "in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for j in range(0, len(list_url)):", "ids.append(int(tweet[1])) max_id = ids[len(ids) - 1] while 1: res_raw =", "self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() res =", "lst = [] if self.remaining is None or self.remaining %", "re import json import time import sys import httplib2 from", ": {:d} / {:d} = {:f}\".format(r['remaining'], r['limit'], r['remaining'] / r['limit']),", "{:f}\".format(reset), file=sys.stderr) time.sleep(reset + 10) @staticmethod def get_file_extension(binary): mime =", "self.check_limit() r = self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']),", "<reponame>mikoim/funstuff import re import json import time import sys import", "\"\" consumer_secret = \"\" access_token_key = \"\" access_token_secret = \"\"", "try: res = json.loads(res_raw) except: print(res_raw) time.sleep(5) res_raw = self.http_wrapper(", "in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()): ids.append(int(tweet[1])) max_id =", "in r['entities']['media']: lst.append(m['media_url'] + ':orig') return lst def check_limit(self): r", "= None def http_wrapper(self, uri): resp, content = self.http.request( uri=uri,", "content def get_medias(self, nickname): ids = [] for tweet in", "resp, content = self.http.request( uri=uri, method='GET' ) return content def", "consumer_key = \"\" consumer_secret = \"\" access_token_key = \"\" access_token_secret", "is None or self.remaining % 10 is 0 or self.remaining", "- 1] while 1: res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d'", "in range(1, len(sys.argv)): tw = TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]):", "res['items_html']): ids.append(int(tweet[1])) max_id = int(res['max_id']) return list(set(ids)) def get_image_url(self, tweet_id):", "ids[len(ids) - 1] while 1: res_raw = self.http_wrapper( self.baseUrl +", "1: res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname,", "sys import httplib2 from twitter import * import magic class", "'/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() try: res = json.loads(res_raw) except:", "'/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() res = json.loads(res_raw) if not", "True).decode() return mime.split('/')[1] @staticmethod def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, \"%a %b", "self.remaining = r['remaining'] print(\"API Limit : {:d} / {:d} =", "len(list_url)): raw = tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j,", "= \"\" consumer_secret = \"\" access_token_key = \"\" access_token_secret =", "def http_wrapper(self, uri): resp, content = self.http.request( uri=uri, method='GET' )", "magic.from_buffer(binary, True).decode() return mime.split('/')[1] @staticmethod def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, \"%a", "= self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() try:", "\"\" access_token_secret = \"\" t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret))", "mime = magic.from_buffer(binary, True).decode() return mime.split('/')[1] @staticmethod def get_unix_epoch(created_at): return", "nickname, max_id)).decode() try: res = json.loads(res_raw) except: print(res_raw) time.sleep(5) res_raw", "{:d} / {:d} = {:f}\".format(r['remaining'], r['limit'], r['remaining'] / r['limit']), file=sys.stderr)", "1: self.check_limit() r = self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -= 1 print('{:d}\\t{:d}\\t{:s}'.format(tweet_id,", "consumer_key, consumer_secret)) remaining = None def http_wrapper(self, uri): resp, content", "= httplib2.Http(\".cache\") baseUrl = \"https://twitter.com\" consumer_key = \"\" consumer_secret =", "ids = [] for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl + '/%s/media'", "ids.append(int(tweet[1])) max_id = int(res['max_id']) return list(set(ids)) def get_image_url(self, tweet_id): lst", "'/%s/media' % nickname).decode()): ids.append(int(tweet[1])) max_id = ids[len(ids) - 1] while", "for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1])) max_id = int(res['max_id']) return", "content = self.http.request( uri=uri, method='GET' ) return content def get_medias(self,", "/ {:d} = {:f}\".format(r['remaining'], r['limit'], r['remaining'] / r['limit']), file=sys.stderr) if", "def get_medias(self, nickname): ids = [] for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\",", "= [] for tweet in re.findall(\"twitter.com/(.+)/status/([0-9]+)\", self.http_wrapper(self.baseUrl + '/%s/media' %", "None or self.remaining % 10 is 0 or self.remaining <=", "get_image_url(self, tweet_id): lst = [] if self.remaining is None or", "http = httplib2.Http(\".cache\") baseUrl = \"https://twitter.com\" consumer_key = \"\" consumer_secret", "json.loads(res_raw) except: print(res_raw) time.sleep(5) res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d'", "= \"https://twitter.com\" consumer_key = \"\" consumer_secret = \"\" access_token_key =", "re.findall(\"twitter.com/(.+)/status/([0-9]+)\", res['items_html']): ids.append(int(tweet[1])) max_id = int(res['max_id']) return list(set(ids)) def get_image_url(self,", "lst.append(m['media_url'] + ':orig') return lst def check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id']", "time.sleep(reset + 10) @staticmethod def get_file_extension(binary): mime = magic.from_buffer(binary, True).decode()", "http_wrapper(self, uri): resp, content = self.http.request( uri=uri, method='GET' ) return", "baseUrl = \"https://twitter.com\" consumer_key = \"\" consumer_secret = \"\" access_token_key", "%b %d %H:%M:%S +0000 %Y\"))) if __name__ == '__main__': for", "max_id)).decode() res = json.loads(res_raw) if not res['has_more_items']: break for tweet", "+0000 %Y\"))) if __name__ == '__main__': for i in range(1,", "if __name__ == '__main__': for i in range(1, len(sys.argv)): tw", "\"\" t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret)) remaining = None", "int(res['max_id']) return list(set(ids)) def get_image_url(self, tweet_id): lst = [] if", "max_id = int(res['max_id']) return list(set(ids)) def get_image_url(self, tweet_id): lst =", "in range(0, len(list_url)): raw = tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with", "mime.split('/')[1] @staticmethod def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, \"%a %b %d %H:%M:%S", "tweetID in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for j in range(0,", "uri=uri, method='GET' ) return content def get_medias(self, nickname): ids =", "import time import sys import httplib2 from twitter import *", "import * import magic class TwitterMediaDL: http = httplib2.Http(\".cache\") baseUrl", "twitter import * import magic class TwitterMediaDL: http = httplib2.Http(\".cache\")", "file=sys.stderr) time.sleep(reset + 10) @staticmethod def get_file_extension(binary): mime = magic.from_buffer(binary,", "= tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as f: f.write(raw)", "check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining'] print(\"API Limit :", "import sys import httplib2 from twitter import * import magic", "method='GET' ) return content def get_medias(self, nickname): ids = []", "self.http.request( uri=uri, method='GET' ) return content def get_medias(self, nickname): ids", "@staticmethod def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, \"%a %b %d %H:%M:%S +0000", "len(sys.argv)): tw = TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]): list_url =", "print(\"API Limit : {:d} / {:d} = {:f}\".format(r['remaining'], r['limit'], r['remaining']", "reset = r['reset'] - time.time() print(\"Please wait... {:f}\".format(reset), file=sys.stderr) time.sleep(reset", "r['remaining'] print(\"API Limit : {:d} / {:d} = {:f}\".format(r['remaining'], r['limit'],", "tweet_id): lst = [] if self.remaining is None or self.remaining" ]
[ "2.0 (the \"License\"); # you may not use this file", "@@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error", "streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops", "import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import", "@@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from", "summary statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds", "streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from", "import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives", "streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from", "import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points", "metrics and summary statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean", "@@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error", "streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from", "@@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k", "streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from", "from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops", "import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union", "use this file except in compliance with the License. #", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import", "under the License. # ============================================================================== \"\"\"Ops for evaluation metrics and", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "License. # You may obtain a copy of the License", "streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import", "under the License is distributed on an \"AS IS\" BASIS,", "@@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity", "License for the specific language governing permissions and # limitations", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import", "import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall", "import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error", "Reserved. # # Licensed under the Apache License, Version 2.0", "set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint: enable=unused-import,line-too-long from tensorflow.python.util.all_util", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops", "governing permissions and # limitations under the License. # ==============================================================================", "Copyright 2016 The TensorFlow Authors. All Rights Reserved. # #", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "============================================================================== \"\"\"Ops for evaluation metrics and summary statistics. See the", "# ============================================================================== \"\"\"Ops for evaluation metrics and summary statistics. See", "from __future__ import absolute_import from __future__ import division from __future__", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives", "set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union #", "import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops", "import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import", "import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives", "the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import", "import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import", "aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from", "to in writing, software # distributed under the License is", "# See the License for the specific language governing permissions", "disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import * # pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops", "import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance", "import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import", "the License. # ============================================================================== \"\"\"Ops for evaluation metrics and summary", "import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k", "from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops", "@@set_intersection @@set_size @@set_union \"\"\" from __future__ import absolute_import from __future__", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "@@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error", "from __future__ import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops", "@@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union \"\"\"", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "@@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance", "@@set_size @@set_union \"\"\" from __future__ import absolute_import from __future__ import", "@@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops", "not use this file except in compliance with the License.", "streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from", "\"\"\"Ops for evaluation metrics and summary statistics. See the @{$python/contrib.metrics}", "writing, software # distributed under the License is distributed on", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "@@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics", "print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import * # pylint:", "import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from", "CONDITIONS OF ANY KIND, either express or implied. # See", "from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops", "@@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union", "streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "@@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat", "import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc", "import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds", "@@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy", "@@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union \"\"\" from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops", "import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops", "import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k", "@@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives", "@@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance", "OR CONDITIONS OF ANY KIND, either express or implied. #", "from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops", "import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy", "the License is distributed on an \"AS IS\" BASIS, #", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops", "streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from", "@@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives", "streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from", "streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from", "@@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity", "import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives", "tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint: enable=unused-import,line-too-long from tensorflow.python.util.all_util import remove_undocumented", "@@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection", "law or agreed to in writing, software # distributed under", "@@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import", "\"\"\" from __future__ import absolute_import from __future__ import division from", "import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import", "@{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops", "import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation", "tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import", "streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops", "limitations under the License. # ============================================================================== \"\"\"Ops for evaluation metrics", "may obtain a copy of the License at # #", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import", "import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error", "streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from", "streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from", "import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "@@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference", "for evaluation metrics and summary statistics. See the @{$python/contrib.metrics} guide.", "@@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k", "may not use this file except in compliance with the", "import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k", "this file except in compliance with the License. # You", "@@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation", "import absolute_import from __future__ import division from __future__ import print_function", "pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import * # pylint: enable=wildcard-import from", "streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "from __future__ import division from __future__ import print_function # pylint:", "division from __future__ import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import", "# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import * # pylint: enable=wildcard-import", "@@set_difference @@set_intersection @@set_size @@set_union \"\"\" from __future__ import absolute_import from", "streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from", "pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram", "@@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k", "@@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from", "streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from", "tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops", "tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from", "@@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union \"\"\" from __future__ import absolute_import", "# pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import", "aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "2016 The TensorFlow Authors. All Rights Reserved. # # Licensed", "import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops", "@@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "@@set_union \"\"\" from __future__ import absolute_import from __future__ import division", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops", "streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops", "import * # pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from", "import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean", "permissions and # limitations under the License. # ============================================================================== \"\"\"Ops", "tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import", "(the \"License\"); # you may not use this file except", "# you may not use this file except in compliance", "streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import", "* # pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops", "from tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint: enable=unused-import,line-too-long from tensorflow.python.util.all_util import", "@@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor", "# # Unless required by applicable law or agreed to", "import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops", "License. # ============================================================================== \"\"\"Ops for evaluation metrics and summary statistics.", "import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from", "tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops", "__future__ import absolute_import from __future__ import division from __future__ import", "implied. # See the License for the specific language governing", "statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision", "under the Apache License, Version 2.0 (the \"License\"); # you", "@@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k", "__future__ import division from __future__ import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import", "absolute_import from __future__ import division from __future__ import print_function #", "enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops", "by applicable law or agreed to in writing, software #", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops", "and summary statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import", "streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from", "streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from", "import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds", "and # limitations under the License. # ============================================================================== \"\"\"Ops for", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import", "@@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops", "# limitations under the License. # ============================================================================== \"\"\"Ops for evaluation", "import division from __future__ import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from", "import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops", "tensorflow.contrib.metrics.python.metrics import * # pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix", "the specific language governing permissions and # limitations under the", "streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from", "applicable law or agreed to in writing, software # distributed", "@@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union \"\"\" from __future__ import", "guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k", "from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint:", "streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from", "in writing, software # distributed under the License is distributed", "import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error", "streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import", "streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from", "@@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "@@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union \"\"\" from __future__", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "@@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k", "import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error", "Authors. All Rights Reserved. # # Licensed under the Apache", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import", "@@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import", "evaluation metrics and summary statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy", "the License for the specific language governing permissions and #", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "from tensorflow.contrib.metrics.python.metrics import * # pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "@@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou", "from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops", "import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import", "See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds", "import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint: enable=unused-import,line-too-long from", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "import set_union # pylint: enable=unused-import,line-too-long from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved. #", "__future__ import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import *", "@@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import", "confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import", "@@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds", "\"License\"); # you may not use this file except in", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops", "import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import * #", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops", "import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat", "@@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "@@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less", "tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import", "tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint: enable=unused-import,line-too-long", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops", "import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds", "streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from", "You may obtain a copy of the License at #", "streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from", "import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity", "streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops", "the Apache License, Version 2.0 (the \"License\"); # you may", "from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops" ]
[ "be used for other purposes that require groupings of users", "querying membership, which involves checking access control policies, which is", "a specific user. The task of querying all members within", "in the database. :param group: The group document to delete.", "the list of all users who belong to this group.", "= { '$pull': { 'access.groups': {'id': group['_id']} } } #", "and also grants the specified access level on the group", "least read access on the group. \"\"\" if not 'groups'", "group. \"\"\" def initialize(self): self.name = 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name':", "to list members on. :param offset: Offset into the result", "group is much less common and typically only performed ona", "def getMembers(self, group, offset=0, limit=50, sort=None): \"\"\" Return the list", "user['groups'] = [] if not group['_id'] in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user,", "Whether the group is publicly visible. :type public: bool :param", "name already' 'exists.', 'name') return doc def list(self, user=None, limit=50,", "'groupInvites' in user: user['groupInvites'] = [] for invite in user['groupInvites']:", "they accept the invitation, they will be given the specified", "validate=False) break else: raise AccessException('User was not invited to this", "in doc: q['_id'] = {'$ne': doc['_id']} duplicates = self.find(q, limit=1,", "and its members. Users with WRITE access on the group", "self.model('user').update({ 'groups': group['_id'] }, { '$pull': {'groups': group['_id']} }) acQuery", "self.setPublic(group, public=public) # Now validate and save the group self.save(group)", "join the group. Inviting them automatically grants the user read", "user accepts an invitation. \"\"\" if not 'groupInvites' in user:", "group and also grant them # admin access over the", "time, so doing a find on the indexed group list", "self.model('user').update(acQuery, acUpdate) # Finally, delete the document itself AccessControlledModel.remove(self, group)", "{ 'access.groups': {'id': group['_id']} } } # Remove references to", "of access. \"\"\" # User has to be able to", "def list(self, user=None, limit=50, offset=0, sort=None): \"\"\" Search for groups", "this to perform a text search of all groups. :param", "if not 'groups' in user: user['groups'] = [] if not", "the invitation, they will be given the specified level of", "add and remove members and change the name or description.", "on the group itself to the user. Any group member", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "\"\"\" Remove the user from the group. \"\"\" # Remove", "\"\"\" if not 'groupInvites' in user: user['groupInvites'] = [] for", "acUpdate = { '$pull': { 'access.groups': {'id': group['_id']} } }", "'$pull': {'groups': group['_id']} }) acQuery = { 'access.groups.id': group['_id'] }", "import datetime from .model_base import AccessControlledModel,\\ ValidationException,\\ AccessException from girder.constants", "language governing permissions and # limitations under the License. ###############################################################################", "specified access level on the group itself to the user.", "'groups' in user: user['groups'] = [] if not group['_id'] in", "= [] for user in cursor: users.append(user) return users def", "access on the group. \"\"\" if not 'groups' in user:", "itself to the user. Any group member has at least", "access to the group so that they can see it.", "from girder.constants import AccessType class Group(AccessControlledModel): \"\"\" Groups are simply", "the document itself AccessControlledModel.remove(self, group) def getMembers(self, group, offset=0, limit=50,", "is much less common and typically only performed ona single", "see it. Once they accept the invitation, they will be", "the group to join it self.setUserAccess(group, user, AccessType.READ, save=True) if", "\"\"\" Groups are simply groups of users. The primary use", "list all visible groups. :param text: Pass this to perform", "is publicly visible. :type public: bool :param creator: User document", "visible. :type public: bool :param creator: User document representing the", "# Remove all group access for this user on this", "cursor: users.append(user) return users def addUser(self, group, user, level=AccessType.READ): \"\"\"", "representing the creator of the group. :type creator: dict :returns:", "users is to simplify access control for resources in the", "access can delete the entire group. \"\"\" def initialize(self): self.name", "group.') return group def inviteUser(self, group, user, level=AccessType.READ): \"\"\" Invite", "use this file except in compliance with the License. #", "user document, and also grants the specified access level on", "user=None, limit=50, offset=0, sort=None): \"\"\" Search for groups or simply", "set of users. :param limit: Result set size limit. :param", "which involves checking access control policies, which is always done", "groups. :param user: The user to search as. :param limit:", "group, user): \"\"\" Call this when the user accepts an", "be able to see the group to join it self.setUserAccess(group,", "and change the name or description. Users with ADMIN access", "model. This is to optimize for the most common use", "text: Pass this to perform a text search of all", "}, { '$pull': {'groups': group['_id']} }) acQuery = { 'access.groups.id':", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Delete a group, and all references to it in the", "group['_id']: self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break else: raise", "done relative to a specific user. The task of querying", "removeUser(self, group, user): \"\"\" Remove the user from the group.", "will be given admin access to it. :param name: The", "License. # You may obtain a copy of the License", "references to this group from user group membership lists self.model('user').update({", "group, user, level=AccessType.READ): \"\"\" Add the user to the group.", "0: raise ValidationException('A group with that name already' 'exists.', 'name')", "under the License is distributed on an \"AS IS\" BASIS,", "acQuery = { 'access.groups.id': group['_id'] } acUpdate = { '$pull':", "License for the specific language governing permissions and # limitations", "License. ############################################################################### import datetime from .model_base import AccessControlledModel,\\ ValidationException,\\ AccessException", "see the group and its members. Users with WRITE access", "group = { 'name': name, 'description': description, 'created': now, 'updated':", "access-controlled collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate)", "Copyright 2013 Kitware Inc. # # Licensed under the Apache", "{ 'lowerName': doc['lowerName'], } if '_id' in doc: q['_id'] =", "creator: dict :returns: The group document that was created. \"\"\"", "AccessType.READ, save=True) if group['_id'] in user.get('groups', []): raise ValidationException('User is", "group. Records membership in the group in the user document,", "the specified access level on the group itself to the", "if invite['groupId'] == group['_id']: invite['level'] = level break else: user['groupInvites'].append({", "use of grouping users is to simplify access control for", "references to it in the database. :param group: The group", "on the user document only; there is no \"users\" field", "The sort direction. \"\"\" # Perform the find; we'll do", "invitation. \"\"\" if not 'groupInvites' in user: user['groupInvites'] = []", "doc): doc['name'] = doc['name'].strip() doc['lowerName'] = doc['name'].lower() doc['description'] = doc['description'].strip()", "it. Once they accept the invitation, they will be given", "a time, so doing a find on the indexed group", "}) def validate(self, doc): doc['name'] = doc['name'].strip() doc['lowerName'] = doc['name'].lower()", "sort=None): \"\"\" Search for groups or simply list all visible", "save=True) if group['_id'] in user.get('groups', []): raise ValidationException('User is already", "only; there is no \"users\" field in this model. This", "creator: User document representing the creator of the group. :type", "{ 'access.groups.id': group['_id'] } acUpdate = { '$pull': { 'access.groups':", "sort=sort) users = [] for user in cursor: users.append(user) return", "AccessControlledModel.remove(self, group) def getMembers(self, group, offset=0, limit=50, sort=None): \"\"\" Return", "access level on the group itself to the user. Any", "as well. Group membership is stored in the database on", "self.name = 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10, 'description': 1 })", "the user collection is sufficiently fast. Users with READ access", "AccessException from girder.constants import AccessType class Group(AccessControlledModel): \"\"\" Groups are", "a new group. The creator will be given admin access", "membership lists self.model('user').update({ 'groups': group['_id'] }, { '$pull': {'groups': group['_id']}", "dict :returns: The group document that was created. \"\"\" assert", "in compliance with the License. # You may obtain a", "ValidationException('A group with that name already' 'exists.', 'name') return doc", "all group access for this user on this group. self.setUserAccess(group,", "# Copyright 2013 Kitware Inc. # # Licensed under the", "The creator will be given admin access to it. :param", "{'$ne': doc['_id']} duplicates = self.find(q, limit=1, fields=['_id']) if duplicates.count() !=", "software # distributed under the License is distributed on an", "of querying all members within a group is much less", "to simplify access control for resources in the system, but", "search as. :param limit: Result set size limit. :param offset:", "able to see the group to join it self.setUserAccess(group, user,", "the find; we'll do access-based filtering of the result #", "Any group member has at least read access on the", "into the result set of users. :param limit: Result set", "user documents. \"\"\" q = { 'groups': group['_id'] } cursor", "given admin access to it. :param name: The name of", "group document that was created. \"\"\" assert type(public) is bool", "the name or description. Users with ADMIN access can delete", "raise AccessException('User was not invited to this group.') return group", "\"License\" ); # you may not use this file except", "doc['name'] = doc['name'].strip() doc['lowerName'] = doc['name'].lower() doc['description'] = doc['description'].strip() if", "in the group in the user document, and also grants", "the user read access to the group so that they", "from user group membership lists self.model('user').update({ 'groups': group['_id'] }, {", "'$pull': { 'access.groups': {'id': group['_id']} } } # Remove references", "break else: user['groupInvites'].append({ 'groupId': group['_id'], 'level': level }) return self.model('user').save(user,", "group can add and remove members and change the name", "\"\"\" # Remove group membership for this user. if 'groups'", "def inviteUser(self, group, user, level=AccessType.READ): \"\"\" Invite a user to", "list members on. :param offset: Offset into the result set", "# Remove references to this group from access-controlled collections. self.update(acQuery,", "is no \"users\" field in this model. This is to", "user: The user to search as. :param limit: Result set", "admin access over the group. self.addUser(group, creator, level=AccessType.ADMIN) return group", "stored in the database on the user document only; there", "for the find query. :returns: List of user documents. \"\"\"", "getMembers(self, group, offset=0, limit=50, sort=None): \"\"\" Return the list of", "group['_id'] } cursor = self.model('user').find( q, offset=offset, limit=limit, sort=sort) users", "level=AccessType.READ): \"\"\" Add the user to the group. Records membership", "\"\"\" Return the list of all users who belong to", "user: user['groups'] = [] if not group['_id'] in user['groups']: user['groups'].append(group['_id'])", "of this group and also grant them # admin access", "the system, but they can be used for other purposes", "group member has at least read access on the group.", "group is publicly visible. :type public: bool :param creator: User", "level=AccessType.READ, limit=limit, offset=offset): yield r def remove(self, group): \"\"\" Delete", "invite['groupId'] == group['_id']: invite['level'] = level break else: user['groupInvites'].append({ 'groupId':", "\"\"\" # User has to be able to see the", "validate and save the group self.save(group) # We make the", "return group def joinGroup(self, group, user): \"\"\" Call this when", "the folder. :type name: str :param description: Description for the", "'groups' in user and group['_id'] in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False)", "user. Any group member has at least read access on", "offset=0, limit=50, sort=None): \"\"\" Return the list of all users", "text search of all groups. :param user: The user to", "duplicates = self.find(q, limit=1, fields=['_id']) if duplicates.count() != 0: raise", "this user. if 'groups' in user and group['_id'] in user['groups']:", "group from access-controlled collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate)", "already' 'exists.', 'name') return doc def list(self, user=None, limit=50, offset=0,", "not invited to this group.') return group def inviteUser(self, group,", "public: Whether the group is publicly visible. :type public: bool", "members on. :param offset: Offset into the result set of", "to it. :param name: The name of the folder. :type", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "make the creator a member of this group and also", "is to optimize for the most common use case for", "within a group is much less common and typically only", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "= doc['name'].strip() doc['lowerName'] = doc['name'].lower() doc['description'] = doc['description'].strip() if not", "limit: Result set size limit. :param offset: Offset into the", "automatically grants the user read access to the group so", "group['_id']} } } # Remove references to this group from", "girder.constants import AccessType class Group(AccessControlledModel): \"\"\" Groups are simply groups", "user, level=AccessType.READ): \"\"\" Invite a user to join the group.", "user, AccessType.READ, save=True) if group['_id'] in user.get('groups', []): raise ValidationException('User", "delete the document itself AccessControlledModel.remove(self, group) def getMembers(self, group, offset=0,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "remove members and change the name or description. Users with", "of users. The primary use of grouping users is to", "to in writing, software # distributed under the License is", "search of all groups. :param user: The user to search", "doc['description'].strip() if not doc['name']: raise ValidationException('Group name must not be", "group) def getMembers(self, group, offset=0, limit=50, sort=None): \"\"\" Return the", "Remove the user from the group. \"\"\" # Remove group", "# See the License for the specific language governing permissions", ":param text: Pass this to perform a text search of", "limit=50, sort=None): \"\"\" Return the list of all users who", "group['_id']: invite['level'] = level break else: user['groupInvites'].append({ 'groupId': group['_id'], 'level':", "groups of users. The primary use of grouping users is", "an invitation. \"\"\" if not 'groupInvites' in user: user['groupInvites'] =", "\"\"\" def initialize(self): self.name = 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10,", "coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. #", "# # Licensed under the Apache License, Version 2.0 (", "\"\"\" # Remove references to this group from user group", "group and its members. Users with WRITE access on the", "user document only; there is no \"users\" field in this", "Group membership is stored in the database on the user", "name or description. Users with ADMIN access can delete the", "[] for user in cursor: users.append(user) return users def addUser(self,", "[] for invite in user['groupInvites']: if invite['groupId'] == group['_id']: self.addUser(group,", "or agreed to in writing, software # distributed under the", "# Remove references to this group from user group membership", "The group document that was created. \"\"\" assert type(public) is", "they will be given the specified level of access. \"\"\"", "required by applicable law or agreed to in writing, software", "\"\"\" q = { 'groups': group['_id'] } cursor = self.model('user').find(", "membership for this user. if 'groups' in user and group['_id']", "now = datetime.datetime.now() group = { 'name': name, 'description': description,", "return self.model('user').save(user, validate=False) def removeUser(self, group, user): \"\"\" Remove the", "self.setUserAccess(group, user, level=None, save=True) return group def createGroup(self, name, creator,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# User has to be able to see the group", "with the License. # You may obtain a copy of", "to this group. :param group: The group to list members", "them automatically grants the user read access to the group", "results. :param sort: The sort direction. \"\"\" # Perform the", "r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit, offset=offset): yield r def", "doc['name']: raise ValidationException('Group name must not be empty.', 'name') q", "as. :param limit: Result set size limit. :param offset: Offset", "group. \"\"\" if not 'groups' in user: user['groups'] = []", "if not doc['name']: raise ValidationException('Group name must not be empty.',", "user. The task of querying all members within a group", "offset: Offset into the result set of users. :param limit:", "creator a member of this group and also grant them", "name: The name of the folder. :type name: str :param", "the group self.save(group) # We make the creator a member", "common use case for querying membership, which involves checking access", "Perform the find; we'll do access-based filtering of the result", "to this group.') return group def inviteUser(self, group, user, level=AccessType.READ):", "compliance with the License. # You may obtain a copy", "size limit. :param sort: Sort parameter for the find query.", "agreed to in writing, software # distributed under the License", "'name') q = { 'lowerName': doc['lowerName'], } if '_id' in", "group access for this user on this group. self.setUserAccess(group, user,", "task of querying all members within a group is much", "not doc['name']: raise ValidationException('Group name must not be empty.', 'name')", "users who belong to this group. :param group: The group", "invite in user['groupInvites']: if invite['groupId'] == group['_id']: invite['level'] = level", "grouping users is to simplify access control for resources in", "be empty.', 'name') q = { 'lowerName': doc['lowerName'], } if", "to join it self.setUserAccess(group, user, AccessType.READ, save=True) if group['_id'] in", "specified level of access. \"\"\" # User has to be", "distributed under the License is distributed on an \"AS IS\"", "'access.groups.id': group['_id'] } acUpdate = { '$pull': { 'access.groups': {'id':", "{ '$pull': { 'access.groups': {'id': group['_id']} } } # Remove", "{ 'groups': group['_id'] } cursor = self.model('user').find( q, offset=offset, limit=limit,", "in user['groupInvites']: if invite['groupId'] == group['_id']: invite['level'] = level break", "group def joinGroup(self, group, user): \"\"\" Call this when the", "single group at a time, so doing a find on", ":param sort: Sort parameter for the find query. :returns: List", "a group is much less common and typically only performed", "group. self.setUserAccess(group, user, level=None, save=True) return group def createGroup(self, name,", "self.model('user').save(user, validate=False) # Remove all group access for this user", "r def remove(self, group): \"\"\" Delete a group, and all", "a group, and all references to it in the database.", "express or implied. # See the License for the specific", "# Now validate and save the group self.save(group) # We", "except in compliance with the License. # You may obtain", "} self.setPublic(group, public=public) # Now validate and save the group", "\"\"\" Create a new group. The creator will be given", "validate=False) self.setUserAccess(group, user, level, save=True) return group def joinGroup(self, group,", "# set afterward. cursor = self.find({}, limit=0, sort=sort) for r", "of all users who belong to this group. :param group:", "group. :param group: The group to list members on. :param", "not use this file except in compliance with the License.", "users.append(user) return users def addUser(self, group, user, level=AccessType.READ): \"\"\" Add", "10, 'description': 1 }) def validate(self, doc): doc['name'] = doc['name'].strip()", "members and change the name or description. Users with ADMIN", ":param sort: The sort direction. \"\"\" # Perform the find;", "can see the group and its members. Users with WRITE", "document to delete. :type group: dict \"\"\" # Remove references", "writing, software # distributed under the License is distributed on", "user=user, level=AccessType.READ, limit=limit, offset=offset): yield r def remove(self, group): \"\"\"", "and group['_id'] in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) # Remove all", "'description': 1 }) def validate(self, doc): doc['name'] = doc['name'].strip() doc['lowerName']", "you may not use this file except in compliance with", "control for resources in the system, but they can be", "def addUser(self, group, user, level=AccessType.READ): \"\"\" Add the user to", "the results. :param sort: The sort direction. \"\"\" # Perform", "require groupings of users as well. Group membership is stored", "group, and all references to it in the database. :param", "group['_id']} }) acQuery = { 'access.groups.id': group['_id'] } acUpdate =", "filtering of the result # set afterward. cursor = self.find({},", ".model_base import AccessControlledModel,\\ ValidationException,\\ AccessException from girder.constants import AccessType class", "itself AccessControlledModel.remove(self, group) def getMembers(self, group, offset=0, limit=50, sort=None): \"\"\"", "users as well. Group membership is stored in the database", "acUpdate) self.model('user').update(acQuery, acUpdate) # Finally, delete the document itself AccessControlledModel.remove(self,", "sort=None): \"\"\" Return the list of all users who belong", "of user documents. \"\"\" q = { 'groups': group['_id'] }", "member of this group and also grant them # admin", "level break else: user['groupInvites'].append({ 'groupId': group['_id'], 'level': level }) return", "the creator of the group. :type creator: dict :returns: The", "already in this group.') if not 'groupInvites' in user: user['groupInvites']", "limit: Result set size limit. :param sort: Sort parameter for", "grant them # admin access over the group. self.addUser(group, creator,", "CONDITIONS OF ANY KIND, either express or implied. # See", "involves checking access control policies, which is always done relative", "user to the group. Records membership in the group in", "Finally, delete the document itself AccessControlledModel.remove(self, group) def getMembers(self, group,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "return users def addUser(self, group, user, level=AccessType.READ): \"\"\" Add the", "admin access to it. :param name: The name of the", "group at a time, so doing a find on the", "list in the user collection is sufficiently fast. Users with", "level=None, save=True) return group def createGroup(self, name, creator, description='', public=True):", "list(self, user=None, limit=50, offset=0, sort=None): \"\"\" Search for groups or", "group: The group document to delete. :type group: dict \"\"\"", "description. Users with ADMIN access can delete the entire group.", "access-based filtering of the result # set afterward. cursor =", "access control for resources in the system, but they can", "and save the group self.save(group) # We make the creator", "that was created. \"\"\" assert type(public) is bool now =", "perform a text search of all groups. :param user: The", "# We make the creator a member of this group", "group itself to the user. Any group member has at", "user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group, user, level, save=True) return group", "'level': level }) return self.model('user').save(user, validate=False) def removeUser(self, group, user):", "see the group to join it self.setUserAccess(group, user, AccessType.READ, save=True)", "limit=limit, sort=sort) users = [] for user in cursor: users.append(user)", "users = [] for user in cursor: users.append(user) return users", "and typically only performed ona single group at a time,", "utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. # #", "is bool now = datetime.datetime.now() group = { 'name': name,", "from .model_base import AccessControlledModel,\\ ValidationException,\\ AccessException from girder.constants import AccessType", "return group def inviteUser(self, group, user, level=AccessType.READ): \"\"\" Invite a", "other purposes that require groupings of users as well. Group", "doing a find on the indexed group list in the", "the user from the group. \"\"\" # Remove group membership", "group: dict \"\"\" # Remove references to this group from", "The name of the folder. :type name: str :param description:", "raise ValidationException('Group name must not be empty.', 'name') q =", "to the group so that they can see it. Once", "group['_id'] in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) # Remove all group", "user['groupInvites'] = [] for invite in user['groupInvites']: if invite['groupId'] ==", "python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2013", "on the indexed group list in the user collection is", "of grouping users is to simplify access control for resources", "OR CONDITIONS OF ANY KIND, either express or implied. #", "q['_id'] = {'$ne': doc['_id']} duplicates = self.find(q, limit=1, fields=['_id']) if", "# Perform the find; we'll do access-based filtering of the", "all users who belong to this group. :param group: The", "set size limit. :param offset: Offset into the results. :param", "the group can see the group and its members. Users", "the group. \"\"\" # Remove group membership for this user.", "initialize(self): self.name = 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10, 'description': 1", "the License is distributed on an \"AS IS\" BASIS, #", "createGroup(self, name, creator, description='', public=True): \"\"\" Create a new group.", "invite['level'] = level break else: user['groupInvites'].append({ 'groupId': group['_id'], 'level': level", "user group membership lists self.model('user').update({ 'groups': group['_id'] }, { '$pull':", "document representing the creator of the group. :type creator: dict", "in this model. This is to optimize for the most", "remove(self, group): \"\"\" Delete a group, and all references to", "self.model('user').find( q, offset=offset, limit=limit, sort=sort) users = [] for user", "= doc['description'].strip() if not doc['name']: raise ValidationException('Group name must not", "always done relative to a specific user. The task of", "group can see the group and its members. Users with", "[] for invite in user['groupInvites']: if invite['groupId'] == group['_id']: invite['level']", "name, 'description': description, 'created': now, 'updated': now } self.setPublic(group, public=public)", "group): \"\"\" Delete a group, and all references to it", "else: user['groupInvites'].append({ 'groupId': group['_id'], 'level': level }) return self.model('user').save(user, validate=False)", "name must not be empty.', 'name') q = { 'lowerName':", "} acUpdate = { '$pull': { 'access.groups': {'id': group['_id']} }", "str :param description: Description for the folder. :type description: str", "group list in the user collection is sufficiently fast. Users", "given the specified level of access. \"\"\" # User has", "the folder. :type description: str :param public: Whether the group", "assert type(public) is bool now = datetime.datetime.now() group = {", "created. \"\"\" assert type(public) is bool now = datetime.datetime.now() group", "the result # set afterward. cursor = self.find({}, limit=0, sort=sort)", "it self.setUserAccess(group, user, AccessType.READ, save=True) if group['_id'] in user.get('groups', []):", "system, but they can be used for other purposes that", "in the user collection is sufficiently fast. Users with READ", "self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10, 'description': 1 }) def validate(self, doc):", "the group. :type creator: dict :returns: The group document that", "self.setUserAccess(group, user, AccessType.READ, save=True) if group['_id'] in user.get('groups', []): raise", "def remove(self, group): \"\"\" Delete a group, and all references", "law or agreed to in writing, software # distributed under", "case for querying membership, which involves checking access control policies,", "group def inviteUser(self, group, user, level=AccessType.READ): \"\"\" Invite a user", "user): \"\"\" Remove the user from the group. \"\"\" #", "size limit. :param offset: Offset into the results. :param sort:", "accept the invitation, they will be given the specified level", "accepts an invitation. \"\"\" if not 'groupInvites' in user: user['groupInvites']", "invited to this group.') return group def inviteUser(self, group, user,", "== group['_id']: invite['level'] = level break else: user['groupInvites'].append({ 'groupId': group['_id'],", "simply groups of users. The primary use of grouping users", "#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright", "'access.groups': {'id': group['_id']} } } # Remove references to this", "lists self.model('user').update({ 'groups': group['_id'] }, { '$pull': {'groups': group['_id']} })", "License, Version 2.0 ( the \"License\" ); # you may", "import AccessControlledModel,\\ ValidationException,\\ AccessException from girder.constants import AccessType class Group(AccessControlledModel):", "Offset into the results. :param sort: The sort direction. \"\"\"", "access. \"\"\" # User has to be able to see", ":param offset: Offset into the result set of users. :param", "user, level, save=True) return group def joinGroup(self, group, user): \"\"\"", "this model. This is to optimize for the most common", "\"users\" field in this model. This is to optimize for", "querying all members within a group is much less common", "limit=0, sort=sort) for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit, offset=offset):", "invite in user['groupInvites']: if invite['groupId'] == group['_id']: self.addUser(group, user, level=invite['level'])", "folder. :type name: str :param description: Description for the folder.", "The task of querying all members within a group is", "not 'groups' in user: user['groups'] = [] if not group['_id']", "== group['_id']: self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break else:", "}) return self.model('user').save(user, validate=False) def removeUser(self, group, user): \"\"\" Remove", "this when the user accepts an invitation. \"\"\" if not", "2013 Kitware Inc. # # Licensed under the Apache License,", "the group. \"\"\" if not 'groups' in user: user['groups'] =", "group. The creator will be given admin access to it.", "users. :param limit: Result set size limit. :param sort: Sort", "may obtain a copy of the License at # #", "Search for groups or simply list all visible groups. :param", "user['groupInvites']: if invite['groupId'] == group['_id']: self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user,", "new group. The creator will be given admin access to", "a text search of all groups. :param user: The user", "when the user accepts an invitation. \"\"\" if not 'groupInvites'", "user and group['_id'] in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) # Remove", "the indexed group list in the user collection is sufficiently", "user, level=AccessType.READ): \"\"\" Add the user to the group. Records", "can be used for other purposes that require groupings of", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "'_id' in doc: q['_id'] = {'$ne': doc['_id']} duplicates = self.find(q,", "the Apache License, Version 2.0 ( the \"License\" ); #", "for the most common use case for querying membership, which", "else: raise AccessException('User was not invited to this group.') return", "direction. \"\"\" # Perform the find; we'll do access-based filtering", "Licensed under the Apache License, Version 2.0 ( the \"License\"", "Once they accept the invitation, they will be given the", "offset=offset, limit=limit, sort=sort) users = [] for user in cursor:", "be given the specified level of access. \"\"\" # User", "sort: The sort direction. \"\"\" # Perform the find; we'll", "may not use this file except in compliance with the", "name: str :param description: Description for the folder. :type description:", "references to this group from access-controlled collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery,", "parameter for the find query. :returns: List of user documents.", "for user in cursor: users.append(user) return users def addUser(self, group,", "cursor = self.find({}, limit=0, sort=sort) for r in self.filterResultsByPermission(cursor=cursor, user=user,", ":type description: str :param public: Whether the group is publicly", "group in the user document, and also grants the specified", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "in user: user['groupInvites'] = [] for invite in user['groupInvites']: if", "this file except in compliance with the License. # You", "documents. \"\"\" q = { 'groups': group['_id'] } cursor =", "!= 0: raise ValidationException('A group with that name already' 'exists.',", "yield r def remove(self, group): \"\"\" Delete a group, and", "creator, description='', public=True): \"\"\" Create a new group. The creator", "to a specific user. The task of querying all members", "of the result # set afterward. cursor = self.find({}, limit=0,", "# Licensed under the Apache License, Version 2.0 ( the", "if invite['groupId'] == group['_id']: self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False)", "level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break else: raise AccessException('User was not", "to optimize for the most common use case for querying", "belong to this group. :param group: The group to list", "this group and also grant them # admin access over", "q, offset=offset, limit=limit, sort=sort) users = [] for user in", "the group in the user document, and also grants the", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "groupings of users as well. Group membership is stored in", "change the name or description. Users with ADMIN access can", "set size limit. :param sort: Sort parameter for the find", "level of access. \"\"\" # User has to be able", "}) acQuery = { 'access.groups.id': group['_id'] } acUpdate = {", "well. Group membership is stored in the database on the", "'name': name, 'description': description, 'created': now, 'updated': now } self.setPublic(group,", "self.ensureTextIndex({ 'name': 10, 'description': 1 }) def validate(self, doc): doc['name']", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "to it in the database. :param group: The group document", "optimize for the most common use case for querying membership,", "access to it. :param name: The name of the folder.", "delete the entire group. \"\"\" def initialize(self): self.name = 'group'", "= [] if not group['_id'] in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False)", "Records membership in the group in the user document, and", "acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate) # Finally, delete", "user on this group. self.setUserAccess(group, user, level=None, save=True) return group", ":param creator: User document representing the creator of the group.", "limit. :param offset: Offset into the results. :param sort: The", "addUser(self, group, user, level=AccessType.READ): \"\"\" Add the user to the", "doc: q['_id'] = {'$ne': doc['_id']} duplicates = self.find(q, limit=1, fields=['_id'])", "the find query. :returns: List of user documents. \"\"\" q", "Inviting them automatically grants the user read access to the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "group with that name already' 'exists.', 'name') return doc def", "= self.model('user').find( q, offset=offset, limit=limit, sort=sort) users = [] for", "user read access to the group so that they can", "Result set size limit. :param offset: Offset into the results.", "all groups. :param user: The user to search as. :param", "much less common and typically only performed ona single group", "delete. :type group: dict \"\"\" # Remove references to this", "query. :returns: List of user documents. \"\"\" q = {", "if duplicates.count() != 0: raise ValidationException('A group with that name", "empty.', 'name') q = { 'lowerName': doc['lowerName'], } if '_id'", "limit=1, fields=['_id']) if duplicates.count() != 0: raise ValidationException('A group with", "them # admin access over the group. self.addUser(group, creator, level=AccessType.ADMIN)", "ValidationException('User is already in this group.') if not 'groupInvites' in", "for groups or simply list all visible groups. :param text:", "Call this when the user accepts an invitation. \"\"\" if", "User has to be able to see the group to", ":param group: The group document to delete. :type group: dict", "membership is stored in the database on the user document", "group.') if not 'groupInvites' in user: user['groupInvites'] = [] for", "access control policies, which is always done relative to a", "or simply list all visible groups. :param text: Pass this", "datetime from .model_base import AccessControlledModel,\\ ValidationException,\\ AccessException from girder.constants import", "the entire group. \"\"\" def initialize(self): self.name = 'group' self.ensureIndices(['lowerName'])", "The group to list members on. :param offset: Offset into", "read access on the group. \"\"\" if not 'groups' in", "level=AccessType.READ): \"\"\" Invite a user to join the group. Inviting", "bool now = datetime.datetime.now() group = { 'name': name, 'description':", "the License. ############################################################################### import datetime from .model_base import AccessControlledModel,\\ ValidationException,\\", ":param public: Whether the group is publicly visible. :type public:", "in cursor: users.append(user) return users def addUser(self, group, user, level=AccessType.READ):", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "group, user, level=AccessType.READ): \"\"\" Invite a user to join the", "doc['_id']} duplicates = self.find(q, limit=1, fields=['_id']) if duplicates.count() != 0:", "if '_id' in doc: q['_id'] = {'$ne': doc['_id']} duplicates =", "this group. self.setUserAccess(group, user, level=None, save=True) return group def createGroup(self,", "group membership for this user. if 'groups' in user and", "specific user. The task of querying all members within a", "2.0 ( the \"License\" ); # you may not use", "limit=50, offset=0, sort=None): \"\"\" Search for groups or simply list", "this group from access-controlled collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery,", "user to join the group. Inviting them automatically grants the", "sufficiently fast. Users with READ access on the group can", "is sufficiently fast. Users with READ access on the group", "primary use of grouping users is to simplify access control", "to this group from user group membership lists self.model('user').update({ 'groups':", "user to search as. :param limit: Result set size limit.", "to the group. Records membership in the group in the", "on the group can see the group and its members.", "the most common use case for querying membership, which involves", "purposes that require groupings of users as well. Group membership", "or implied. # See the License for the specific language", "= { 'groups': group['_id'] } cursor = self.model('user').find( q, offset=offset,", "not group['_id'] in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group, user, level,", "this group.') return group def inviteUser(self, group, user, level=AccessType.READ): \"\"\"", "str :param public: Whether the group is publicly visible. :type", "for this user on this group. self.setUserAccess(group, user, level=None, save=True)", "under the License. ############################################################################### import datetime from .model_base import AccessControlledModel,\\", "ona single group at a time, so doing a find", "is to simplify access control for resources in the system,", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "field in this model. This is to optimize for the", "doc def list(self, user=None, limit=50, offset=0, sort=None): \"\"\" Search for", "ValidationException,\\ AccessException from girder.constants import AccessType class Group(AccessControlledModel): \"\"\" Groups", "= self.find({}, limit=0, sort=sort) for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ,", "to see the group to join it self.setUserAccess(group, user, AccessType.READ,", "a user to join the group. Inviting them automatically grants", "if group['_id'] in user.get('groups', []): raise ValidationException('User is already in", "in user.get('groups', []): raise ValidationException('User is already in this group.')", "policies, which is always done relative to a specific user.", "sort direction. \"\"\" # Perform the find; we'll do access-based", "simplify access control for resources in the system, but they", "document itself AccessControlledModel.remove(self, group) def getMembers(self, group, offset=0, limit=50, sort=None):", ":param offset: Offset into the results. :param sort: The sort", "for invite in user['groupInvites']: if invite['groupId'] == group['_id']: invite['level'] =", "\"\"\" Invite a user to join the group. Inviting them", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "and remove members and change the name or description. Users", "if not group['_id'] in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group, user,", "not 'groupInvites' in user: user['groupInvites'] = [] for invite in", "\"\"\" assert type(public) is bool now = datetime.datetime.now() group =", "do access-based filtering of the result # set afterward. cursor", "collection is sufficiently fast. Users with READ access on the", "its members. Users with WRITE access on the group can", "We make the creator a member of this group and", "1 }) def validate(self, doc): doc['name'] = doc['name'].strip() doc['lowerName'] =", "a find on the indexed group list in the user", "doc['lowerName'], } if '_id' in doc: q['_id'] = {'$ne': doc['_id']}", "be given admin access to it. :param name: The name", "at least read access on the group. \"\"\" if not", "fields=['_id']) if duplicates.count() != 0: raise ValidationException('A group with that", "user: user['groupInvites'] = [] for invite in user['groupInvites']: if invite['groupId']", "READ access on the group can see the group and", "save=True) return group def joinGroup(self, group, user): \"\"\" Call this", "# you may not use this file except in compliance", "-*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc.", "self.setUserAccess(group, user, level, save=True) return group def joinGroup(self, group, user):", "def joinGroup(self, group, user): \"\"\" Call this when the user", "sort: Sort parameter for the find query. :returns: List of", "in user and group['_id'] in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) #", "has at least read access on the group. \"\"\" if", "joinGroup(self, group, user): \"\"\" Call this when the user accepts", "'name') return doc def list(self, user=None, limit=50, offset=0, sort=None): \"\"\"", "acUpdate) # Finally, delete the document itself AccessControlledModel.remove(self, group) def", ":type group: dict \"\"\" # Remove references to this group", "and all references to it in the database. :param group:", "Users with WRITE access on the group can add and", ":param limit: Result set size limit. :param sort: Sort parameter", "was created. \"\"\" assert type(public) is bool now = datetime.datetime.now()", "validate=False) # Remove all group access for this user on", "self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break else: raise AccessException('User", "was not invited to this group.') return group def inviteUser(self,", "the result set of users. :param limit: Result set size", "# admin access over the group. self.addUser(group, creator, level=AccessType.ADMIN) return", "= level break else: user['groupInvites'].append({ 'groupId': group['_id'], 'level': level })", "user from the group. \"\"\" # Remove group membership for", "doc['name'].strip() doc['lowerName'] = doc['name'].lower() doc['description'] = doc['description'].strip() if not doc['name']:", "bool :param creator: User document representing the creator of the", "user collection is sufficiently fast. Users with READ access on", "= { 'name': name, 'description': description, 'created': now, 'updated': now", "can see it. Once they accept the invitation, they will", "all references to it in the database. :param group: The", "# # Unless required by applicable law or agreed to", "limit=limit, offset=offset): yield r def remove(self, group): \"\"\" Delete a", "} } # Remove references to this group from access-controlled", "def createGroup(self, name, creator, description='', public=True): \"\"\" Create a new", "= doc['name'].lower() doc['description'] = doc['description'].strip() if not doc['name']: raise ValidationException('Group", "= {'$ne': doc['_id']} duplicates = self.find(q, limit=1, fields=['_id']) if duplicates.count()", "on this group. self.setUserAccess(group, user, level=None, save=True) return group def", "are simply groups of users. The primary use of grouping", "group document to delete. :type group: dict \"\"\" # Remove", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "the user accepts an invitation. \"\"\" if not 'groupInvites' in", "Apache License, Version 2.0 ( the \"License\" ); # you", "groups or simply list all visible groups. :param text: Pass", ":param user: The user to search as. :param limit: Result", "'lowerName': doc['lowerName'], } if '_id' in doc: q['_id'] = {'$ne':", "description: Description for the folder. :type description: str :param public:", "invite['groupId'] == group['_id']: self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break", "def initialize(self): self.name = 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10, 'description':", "q = { 'groups': group['_id'] } cursor = self.model('user').find( q,", "and also grant them # admin access over the group.", "to be able to see the group to join it", "acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate) # Finally, delete the document", "there is no \"users\" field in this model. This is", "} cursor = self.model('user').find( q, offset=offset, limit=limit, sort=sort) users =", "datetime.datetime.now() group = { 'name': name, 'description': description, 'created': now,", "read access to the group so that they can see", "find query. :returns: List of user documents. \"\"\" q =", "'groupId': group['_id'], 'level': level }) return self.model('user').save(user, validate=False) def removeUser(self,", "import AccessType class Group(AccessControlledModel): \"\"\" Groups are simply groups of", "group. \"\"\" # Remove group membership for this user. if", "governing permissions and # limitations under the License. ############################################################################### import", "to join the group. Inviting them automatically grants the user", "indexed group list in the user collection is sufficiently fast.", "set afterward. cursor = self.find({}, limit=0, sort=sort) for r in", "invitation, they will be given the specified level of access.", ":returns: List of user documents. \"\"\" q = { 'groups':", "-*- ############################################################################### # Copyright 2013 Kitware Inc. # # Licensed", "membership, which involves checking access control policies, which is always", "level }) return self.model('user').save(user, validate=False) def removeUser(self, group, user): \"\"\"", "implied. # See the License for the specific language governing", "Description for the folder. :type description: str :param public: Whether", "database. :param group: The group document to delete. :type group:", "group['_id'] in user.get('groups', []): raise ValidationException('User is already in this", "offset=offset): yield r def remove(self, group): \"\"\" Delete a group,", "WRITE access on the group can add and remove members", "this group. :param group: The group to list members on.", "group['_id'], 'level': level }) return self.model('user').save(user, validate=False) def removeUser(self, group,", ":type public: bool :param creator: User document representing the creator", "= datetime.datetime.now() group = { 'name': name, 'description': description, 'created':", "who belong to this group. :param group: The group to", "= { 'access.groups.id': group['_id'] } acUpdate = { '$pull': {", "cursor = self.model('user').find( q, offset=offset, limit=limit, sort=sort) users = []", "doc['description'] = doc['description'].strip() if not doc['name']: raise ValidationException('Group name must", "most common use case for querying membership, which involves checking", "user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group, user, level, save=True) return group def", "Group(AccessControlledModel): \"\"\" Groups are simply groups of users. The primary", "with ADMIN access can delete the entire group. \"\"\" def", "by applicable law or agreed to in writing, software #", "to this group from access-controlled collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate)", "Sort parameter for the find query. :returns: List of user", "user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break else: raise AccessException('User was not invited", "folder. :type description: str :param public: Whether the group is", "that require groupings of users as well. Group membership is", "Return the list of all users who belong to this", "'created': now, 'updated': now } self.setPublic(group, public=public) # Now validate", "{'groups': group['_id']} }) acQuery = { 'access.groups.id': group['_id'] } acUpdate", "# Finally, delete the document itself AccessControlledModel.remove(self, group) def getMembers(self,", "for resources in the system, but they can be used", "the group itself to the user. Any group member has", "user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) # Remove all group access for this", "of users as well. Group membership is stored in the", "Add the user to the group. Records membership in the", "membership in the group in the user document, and also", "in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) # Remove all group access", "raise ValidationException('A group with that name already' 'exists.', 'name') return", "into the results. :param sort: The sort direction. \"\"\" #", "raise ValidationException('User is already in this group.') if not 'groupInvites'", "public=True): \"\"\" Create a new group. The creator will be", "AccessType class Group(AccessControlledModel): \"\"\" Groups are simply groups of users.", "used for other purposes that require groupings of users as", "Users with READ access on the group can see the", "level on the group itself to the user. Any group", "offset: Offset into the results. :param sort: The sort direction.", "} if '_id' in doc: q['_id'] = {'$ne': doc['_id']} duplicates", "the user document only; there is no \"users\" field in", "\"\"\" Delete a group, and all references to it in", "in user['groupInvites']: if invite['groupId'] == group['_id']: self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite)", "user['groupInvites']: if invite['groupId'] == group['_id']: invite['level'] = level break else:", "break else: raise AccessException('User was not invited to this group.')", "in the system, but they can be used for other", "but they can be used for other purposes that require", "in the user document, and also grants the specified access", "members within a group is much less common and typically", "permissions and # limitations under the License. ############################################################################### import datetime", "in this group.') if not 'groupInvites' in user: user['groupInvites'] =", "return group def createGroup(self, name, creator, description='', public=True): \"\"\" Create", "the specified level of access. \"\"\" # User has to", "group to list members on. :param offset: Offset into the", "# limitations under the License. ############################################################################### import datetime from .model_base", "dict \"\"\" # Remove references to this group from user", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "'name': 10, 'description': 1 }) def validate(self, doc): doc['name'] =", "Unless required by applicable law or agreed to in writing,", ":param description: Description for the folder. :type description: str :param", "validate=False) def removeUser(self, group, user): \"\"\" Remove the user from", "{ '$pull': {'groups': group['_id']} }) acQuery = { 'access.groups.id': group['_id']", "with READ access on the group can see the group", "self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate) # Finally, delete the document itself", "'groups': group['_id'] } cursor = self.model('user').find( q, offset=offset, limit=limit, sort=sort)", "user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) # Remove all group access for", "def removeUser(self, group, user): \"\"\" Remove the user from the", "Version 2.0 ( the \"License\" ); # you may not", "groups. :param text: Pass this to perform a text search", "the specific language governing permissions and # limitations under the", "group. Inviting them automatically grants the user read access to", ":param group: The group to list members on. :param offset:", "the group. Records membership in the group in the user", "Invite a user to join the group. Inviting them automatically", "############################################################################### # Copyright 2013 Kitware Inc. # # Licensed under", "self.find({}, limit=0, sort=sort) for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit,", "now, 'updated': now } self.setPublic(group, public=public) # Now validate and", "applicable law or agreed to in writing, software # distributed", "creator will be given admin access to it. :param name:", "group def createGroup(self, name, creator, description='', public=True): \"\"\" Create a", "control policies, which is always done relative to a specific", "type(public) is bool now = datetime.datetime.now() group = { 'name':", "duplicates.count() != 0: raise ValidationException('A group with that name already'", ":type creator: dict :returns: The group document that was created.", "document, and also grants the specified access level on the", "inviteUser(self, group, user, level=AccessType.READ): \"\"\" Invite a user to join", "############################################################################### import datetime from .model_base import AccessControlledModel,\\ ValidationException,\\ AccessException from", "on the group can add and remove members and change", "also grant them # admin access over the group. self.addUser(group,", "Groups are simply groups of users. The primary use of", "simply list all visible groups. :param text: Pass this to", "this group from user group membership lists self.model('user').update({ 'groups': group['_id']", "of users. :param limit: Result set size limit. :param sort:", "in writing, software # distributed under the License is distributed", "with that name already' 'exists.', 'name') return doc def list(self,", "= [] for invite in user['groupInvites']: if invite['groupId'] == group['_id']:", "ADMIN access can delete the entire group. \"\"\" def initialize(self):", "user['groupInvites'].append({ 'groupId': group['_id'], 'level': level }) return self.model('user').save(user, validate=False) def", ":type name: str :param description: Description for the folder. :type", "performed ona single group at a time, so doing a", "description: str :param public: Whether the group is publicly visible.", "limitations under the License. ############################################################################### import datetime from .model_base import", "[] if not group['_id'] in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group,", "access on the group can see the group and its", "the group can add and remove members and change the", "self.find(q, limit=1, fields=['_id']) if duplicates.count() != 0: raise ValidationException('A group", "result # set afterward. cursor = self.find({}, limit=0, sort=sort) for", "Create a new group. The creator will be given admin", "); # you may not use this file except in", "database on the user document only; there is no \"users\"", "the user. Any group member has at least read access", "'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10, 'description': 1 }) def validate(self,", "group membership lists self.model('user').update({ 'groups': group['_id'] }, { '$pull': {'groups':", "under the Apache License, Version 2.0 ( the \"License\" );", "fast. Users with READ access on the group can see", "group, offset=0, limit=50, sort=None): \"\"\" Return the list of all", "on the group. \"\"\" if not 'groups' in user: user['groups']", "is always done relative to a specific user. The task", "self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit, offset=offset): yield r def remove(self, group):", "at a time, so doing a find on the indexed", "it. :param name: The name of the folder. :type name:", "Now validate and save the group self.save(group) # We make", "= 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10, 'description': 1 }) def", "AccessException('User was not invited to this group.') return group def", "from access-controlled collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery,", "group from user group membership lists self.model('user').update({ 'groups': group['_id'] },", "q = { 'lowerName': doc['lowerName'], } if '_id' in doc:", "group to join it self.setUserAccess(group, user, AccessType.READ, save=True) if group['_id']", "access on the group can add and remove members and", "'exists.', 'name') return doc def list(self, user=None, limit=50, offset=0, sort=None):", "\"\"\" Call this when the user accepts an invitation. \"\"\"", "limit. :param sort: Sort parameter for the find query. :returns:", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "so that they can see it. Once they accept the", "ValidationException('Group name must not be empty.', 'name') q = {", "User document representing the creator of the group. :type creator:", "user, level=None, save=True) return group def createGroup(self, name, creator, description='',", "for this user. if 'groups' in user and group['_id'] in", "self.save(group) # We make the creator a member of this", ":param name: The name of the folder. :type name: str", "# You may obtain a copy of the License at", "Users with ADMIN access can delete the entire group. \"\"\"", "find; we'll do access-based filtering of the result # set", "save the group self.save(group) # We make the creator a", "can add and remove members and change the name or", "the database on the user document only; there is no", "with WRITE access on the group can add and remove", "\"\"\" Search for groups or simply list all visible groups.", "the group and its members. Users with WRITE access on", "= { 'lowerName': doc['lowerName'], } if '_id' in doc: q['_id']", "the user to the group. Records membership in the group", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "level, save=True) return group def joinGroup(self, group, user): \"\"\" Call", "document that was created. \"\"\" assert type(public) is bool now", "group['_id'] in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group, user, level, save=True)", "in user: user['groups'] = [] if not group['_id'] in user['groups']:", "user. if 'groups' in user and group['_id'] in user['groups']: user['groups'].remove(group['_id'])", "grants the user read access to the group so that", "has to be able to see the group to join", "this group.') if not 'groupInvites' in user: user['groupInvites'] = []", "of the group. :type creator: dict :returns: The group document", "def validate(self, doc): doc['name'] = doc['name'].strip() doc['lowerName'] = doc['name'].lower() doc['description']", "( the \"License\" ); # you may not use this", "Result set size limit. :param sort: Sort parameter for the", "group, user): \"\"\" Remove the user from the group. \"\"\"", "all visible groups. :param text: Pass this to perform a", "the group so that they can see it. Once they", "group['_id'] }, { '$pull': {'groups': group['_id']} }) acQuery = {", "so doing a find on the indexed group list in", "will be given the specified level of access. \"\"\" #", "common and typically only performed ona single group at a", "or description. Users with ADMIN access can delete the entire", "sort=sort) for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit, offset=offset): yield", "list of all users who belong to this group. :param", "access for this user on this group. self.setUserAccess(group, user, level=None,", "name, creator, description='', public=True): \"\"\" Create a new group. The", "all members within a group is much less common and", "the License for the specific language governing permissions and #", "only performed ona single group at a time, so doing", ":param limit: Result set size limit. :param offset: Offset into", "users def addUser(self, group, user, level=AccessType.READ): \"\"\" Add the user", "the group is publicly visible. :type public: bool :param creator:", "of the folder. :type name: str :param description: Description for", "# Remove group membership for this user. if 'groups' in", "visible groups. :param text: Pass this to perform a text", "either express or implied. # See the License for the", "if not 'groupInvites' in user: user['groupInvites'] = [] for invite", "'updated': now } self.setPublic(group, public=public) # Now validate and save", "in the database on the user document only; there is", "The primary use of grouping users is to simplify access", "{ 'name': name, 'description': description, 'created': now, 'updated': now }", "that name already' 'exists.', 'name') return doc def list(self, user=None,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "it in the database. :param group: The group document to", "self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate) # Finally, delete the", "for querying membership, which involves checking access control policies, which", "description, 'created': now, 'updated': now } self.setPublic(group, public=public) # Now", "group self.save(group) # We make the creator a member of", "doc['name'].lower() doc['description'] = doc['description'].strip() if not doc['name']: raise ValidationException('Group name", "Kitware Inc. # # Licensed under the Apache License, Version", "user in cursor: users.append(user) return users def addUser(self, group, user,", "class Group(AccessControlledModel): \"\"\" Groups are simply groups of users. The", "the database. :param group: The group document to delete. :type", "users. The primary use of grouping users is to simplify", "and # limitations under the License. ############################################################################### import datetime from", "doc['lowerName'] = doc['name'].lower() doc['description'] = doc['description'].strip() if not doc['name']: raise", "validate(self, doc): doc['name'] = doc['name'].strip() doc['lowerName'] = doc['name'].lower() doc['description'] =", "grants the specified access level on the group itself to", "user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break else: raise AccessException('User was", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= self.find(q, limit=1, fields=['_id']) if duplicates.count() != 0: raise ValidationException('A", "for invite in user['groupInvites']: if invite['groupId'] == group['_id']: self.addUser(group, user,", "less common and typically only performed ona single group at", "no \"users\" field in this model. This is to optimize", "public=public) # Now validate and save the group self.save(group) #", "The user to search as. :param limit: Result set size", "Offset into the result set of users. :param limit: Result", "also grants the specified access level on the group itself", "Pass this to perform a text search of all groups.", "the \"License\" ); # you may not use this file", "member has at least read access on the group. \"\"\"", "self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate) # Finally,", "user): \"\"\" Call this when the user accepts an invitation.", "typically only performed ona single group at a time, so", "must not be empty.', 'name') q = { 'lowerName': doc['lowerName'],", "return doc def list(self, user=None, limit=50, offset=0, sort=None): \"\"\" Search", "can delete the entire group. \"\"\" def initialize(self): self.name =", "resources in the system, but they can be used for", "\"\"\" Add the user to the group. Records membership in", "Remove all group access for this user on this group.", "we'll do access-based filtering of the result # set afterward.", "to search as. :param limit: Result set size limit. :param", "creator of the group. :type creator: dict :returns: The group", "which is always done relative to a specific user. The", "Remove references to this group from user group membership lists", "this user on this group. self.setUserAccess(group, user, level=None, save=True) return", "to perform a text search of all groups. :param user:", "save=True) return group def createGroup(self, name, creator, description='', public=True): \"\"\"", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "to the user. Any group member has at least read", "user.get('groups', []): raise ValidationException('User is already in this group.') if", "in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit, offset=offset): yield r def remove(self,", "is already in this group.') if not 'groupInvites' in user:", "publicly visible. :type public: bool :param creator: User document representing", "now } self.setPublic(group, public=public) # Now validate and save the", "checking access control policies, which is always done relative to", "for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit, offset=offset): yield r", "The group document to delete. :type group: dict \"\"\" #", "Remove group membership for this user. if 'groups' in user", "they can be used for other purposes that require groupings", "if 'groups' in user and group['_id'] in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user,", "# distributed under the License is distributed on an \"AS", "of all groups. :param user: The user to search as.", "from the group. \"\"\" # Remove group membership for this", "AccessControlledModel,\\ ValidationException,\\ AccessException from girder.constants import AccessType class Group(AccessControlledModel): \"\"\"", "# Unless required by applicable law or agreed to in", "use case for querying membership, which involves checking access control", "\"\"\" if not 'groups' in user: user['groups'] = [] if", "collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate) #", "} # Remove references to this group from access-controlled collections.", "'groups': group['_id'] }, { '$pull': {'groups': group['_id']} }) acQuery =", "join it self.setUserAccess(group, user, AccessType.READ, save=True) if group['_id'] in user.get('groups',", "document only; there is no \"users\" field in this model.", "offset=0, sort=None): \"\"\" Search for groups or simply list all", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "Remove references to this group from access-controlled collections. self.update(acQuery, acUpdate)", "for the folder. :type description: str :param public: Whether the", "is stored in the database on the user document only;", "a member of this group and also grant them #", "result set of users. :param limit: Result set size limit.", "relative to a specific user. The task of querying all", "name of the folder. :type name: str :param description: Description", "\"\"\" # Perform the find; we'll do access-based filtering of", "self.model('user').save(user, validate=False) break else: raise AccessException('User was not invited to", "the group. Inviting them automatically grants the user read access", "You may obtain a copy of the License at #", "afterward. cursor = self.find({}, limit=0, sort=sort) for r in self.filterResultsByPermission(cursor=cursor,", "description='', public=True): \"\"\" Create a new group. The creator will", "they can see it. Once they accept the invitation, they", ":returns: The group document that was created. \"\"\" assert type(public)", "the creator a member of this group and also grant", "[]): raise ValidationException('User is already in this group.') if not", "group: The group to list members on. :param offset: Offset", "self.model('user').save(user, validate=False) self.setUserAccess(group, user, level, save=True) return group def joinGroup(self,", "find on the indexed group list in the user collection", "'description': description, 'created': now, 'updated': now } self.setPublic(group, public=public) #", "the user document, and also grants the specified access level", "members. Users with WRITE access on the group can add", "that they can see it. Once they accept the invitation,", "{'id': group['_id']} } } # Remove references to this group", "entire group. \"\"\" def initialize(self): self.name = 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({", "List of user documents. \"\"\" q = { 'groups': group['_id']", "not be empty.', 'name') q = { 'lowerName': doc['lowerName'], }", "group so that they can see it. Once they accept", "for other purposes that require groupings of users as well.", "group['_id'] } acUpdate = { '$pull': { 'access.groups': {'id': group['_id']}", "self.model('user').save(user, validate=False) def removeUser(self, group, user): \"\"\" Remove the user", "public: bool :param creator: User document representing the creator of", "This is to optimize for the most common use case", "group. :type creator: dict :returns: The group document that was", "# -*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware", "to delete. :type group: dict \"\"\" # Remove references to", "on. :param offset: Offset into the result set of users.", "in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group, user, level, save=True) return" ]
[ "= subproc.communicate() return cmd_stdout.rstrip() def pull_image(name): log.info('Pulling image: %s' %", "set -ex mkdir -p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm", "% config_volume) log.info('puppet_tags %s' % puppet_tags) log.info('manifest %s' % manifest)", "log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file = os.environ.get('CONFIG',", "should be the same for a # given group of", "prefix).split(\"/\")[0] break return config_volume def get_config_hash(prefix, config_volume): hashfile = os.path.join(prefix,", "cmd_stderr = subproc.communicate() return cmd_stdout.rstrip() def pull_image(name): log.info('Pulling image: %s'", "time we support configuring 'shared' services at the same #", "= config.get('volumes', []) config_volume=None for v in volumes: if v.startswith(prefix):", "pm in process_map] success = True for returncode, config_volume in", "puppet tags and manifest. log.info(\"Existing service, appending puppet tags and", "puppet_tags) log.info('manifest %s' % manifest) log.info('config_image %s' % config_image) log.info('volumes", "os.environ.get(k) if os.environ.get('NET_HOST', 'false') == 'true': log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host',", "volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as script_file: os.chmod(script_file.name,", "log.info('Diffing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],", "list of arguments for the above function # to consume.", "then archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\" for d in", "in compliance with the License. You may obtain # a", "we generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' %", "Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for", "4 else [] if not manifest or not config_image: continue", "subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() return cmd_stdout.rstrip()", "cmd_stderr != 'Error response from daemon: ' \\ 'No such", "# We key off of config volume for all configs.", "= service[4] if len(service) > 4 else [] if not", "\"${archivedirs[@]}\"; do if [ -d \"$d\" ]; then rsync_srcs+=\" $d\"", "%s' % startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles:", "man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume) pull_image(config_image) dcmd = ['/usr/bin/docker', 'run', '--user',", "at the same # time. For example configuring all of", "stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr:", "os.environ.get('SHOW_DIFF', None): log.info('Diffing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker',", "on the system. p = multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map))", "\"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\" for d in \"${archivedirs[@]}\"; do", "/usr/bin/puppet apply --verbose $TAGS /etc/config.pp # Disables archiving if [", "'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker command: %s' % '", "# Holds all the information for each process to consume.", "'--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script", "= False # Update the startup configs with the config", "command: %s' % ' '.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile)) with open(outfile, 'w') as out_f: json.dump(infile_data,", "process_map] success = True for returncode, config_volume in zip(returncodes, config_volumes):", "log.info('Service compilation completed.') def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): log.debug('config_volume", "subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) log.info('Removing container: %s'", "'root', '--name', 'docker-puppet-%s' % config_volume, '--env', 'PUPPET_TAGS=%s' % puppet_tags, '--env',", "os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0] break return config_volume def get_config_hash(prefix, config_volume): hashfile", "if cmd_stdout: log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr) else: if cmd_stdout: log.debug(cmd_stdout)", "if cmd_stderr: log.error(cmd_stderr) else: if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr)", "\\ / /var/lib/config-data/puppet-generated/${NAME} # Write a checksum of the config-data", "volume in volumes: if volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script]) env", "= '%s\\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3] != config_image: log.warn(\"Config", "-Rf /etc/puppet/ssl # not in use and causes permission errors", "= os.path.join(prefix, \"%s.md5sum\" % config_volume) hash_data = None if os.path.isfile(hashfile):", "do not match even though\" \" shared volumes are the", "-a -R -0 --delay-updates --delete-after \\ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts", "log.info('Removing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],", "config_image) log.debug('volumes %s' % volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script,", "services. We are also now specifying the container # in", "above function # to consume. process_map = [] for config_volume", "ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this", "[] for config_volume in configs: service = configs[config_volume] puppet_tags =", "service = [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []), ]", "of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings # that can", "and causes permission errors echo \"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json TAGS=\"\"", "to in writing, software # distributed under the License is", "this should be the same for a # given group", "this is to match what we do in deployed-server def", "= '%s,%s' % (configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2] = '%s\\n%s'", "touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp #", "'--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s' % os.environ.get('STEP', '6'),", "we merge shared settings together here. # # We key", "or agreed to in writing, software # distributed under the", "%s' % config_volume) success = False # Update the startup", "import tempfile import multiprocessing log = logging.getLogger() ch = logging.StreamHandler(sys.stdout)", "import sys import subprocess import sys import tempfile import multiprocessing", "mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): log.debug('config_volume %s' % config_volume) log.debug('puppet_tags", "log.debug('config_volume %s' % config_volume) log.debug('puppet_tags %s' % puppet_tags) log.debug('manifest %s'", "Apache License, Version 2.0 (the \"License\"); you may # not", "errors echo \"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json TAGS=\"\" if [ -n", "# Instead of starting them all linearly we run them", "off of config_volume as this should be the same for", "= subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) log.info('Removing container:", "= logging.getLogger() ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG)", "%s' % config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' %", "%s' % volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as", "%s' % config_file) with open(config_file) as f: json_data = json.load(f)", "= ['/usr/bin/docker', 'run', '--user', 'root', '--name', 'docker-puppet-%s' % config_volume, '--env',", "License, Version 2.0 (the \"License\"); you may # not use", "% short_hostname(), '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s' %", "else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch)", "as e.g \"novacomute\" consumes config-data/nova volumes = config.get('volumes', []) config_volume=None", "as f: infile_data = json.load(f) for k, v in infile_data.iteritems():", "config changes tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum", "log.info('puppet_tags %s' % puppet_tags) log.info('manifest %s' % manifest) log.info('config_image %s'", "the mounted config volume - we can't just use the", "not use this file except in compliance with the License.", "'--env', 'PUPPET_TAGS=%s' % puppet_tags, '--env', 'NAME=%s' % config_volume, '--env', 'HOSTNAME=%s'", "e.g \"novacomute\" consumes config-data/nova volumes = config.get('volumes', []) config_volume=None for", "CPUs on the system. p = multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config,", "this is used as a # salt to trigger container", "make a copy of files modified during puppet run #", "dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script]) env = {} # NOTE(flaper87): Always", "stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr", "volumes = service[4] if len(service) > 4 else [] if", "-print0) \\ / /var/lib/config-data/puppet-generated/${NAME} # Write a checksum of the", "run ad-hoc puppet modules # inside of a container. import", "manifest, config_image, [volumes]] settings # that can be used to", "volumes: if volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script]) env = {}", "config_volume = service[0] or '' puppet_tags = service[1] or ''", "service is None: continue if isinstance(service, dict): service = [", "'--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL", "% puppet_tags, '--env', 'NAME=%s' % config_volume, '--env', 'HOSTNAME=%s' % short_hostname(),", "env outfile = os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile)) with open(outfile, 'w')", "when the config changes tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01'", "os.path.join(prefix, \"%s.md5sum\" % config_volume) hash_data = None if os.path.isfile(hashfile): with", "as man_file: man_file.write('include ::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume) pull_image(config_image) dcmd", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "them using a process # pool. This creates a list", "container # in which the services should be configured. This", "to run puppet inside of the given docker container image.", "\"License\"); you may # not use this file except in", "For example configuring all of the heat services # in", "%(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this is to match what we", "log.debug('Running docker command: %s' % ' '.join(dcmd)) subproc = subprocess.Popen(dcmd,", "% os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume',", "injection '--volume', '%s:%s:rw' % (sh_script, sh_script) ] for volume in", "rm_container('docker-puppet-%s' % config_volume) return subproc.returncode # Holds all the information", "log.debug('- %s' % p) # Fire off processes to perform", "to trigger container restart when the config changes tar -c", "% manifest) log.info('config_image %s' % config_image) log.info('volumes %s' % volumes)", "for config_volume in configs: service = configs[config_volume] puppet_tags = service[1]", "None): log.info('Diffing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff',", "/var/lib/config-data/puppet-generated/${NAME} rsync -a -R -0 --delay-updates --delete-after \\ --files-from=<(find $rsync_srcs", "rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} # Also make", "k: k.startswith('DOCKER'), os.environ.keys()): env[k] = os.environ.get(k) if os.environ.get('NET_HOST', 'false') ==", "sys import subprocess import sys import tempfile import multiprocessing log", "subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr =", "% name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout,", "in configs: # Append puppet tags and manifest. log.info(\"Existing service,", "= [pm[0] for pm in process_map] success = True for", "= json.load(f) # To save time we support configuring 'shared'", "the same. configs = {} for service in (json_data or", "env = {} # NOTE(flaper87): Always copy the DOCKER_* environment", "manifest\") if puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags) if", "| md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum fi \"\"\")", "or '' volumes = service[4] if len(service) > 4 else", "service.get('volumes', []), ] config_volume = service[0] or '' puppet_tags =", "the startup configs with the config hash we generated above", "useful for debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync -a -R -0", "cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr and \\", "4 else [] if puppet_tags: puppet_tags = \"file,file_line,concat,augeas,%s\" % puppet_tags", "# only delete successful runs, for debugging rm_container('docker-puppet-%s' % config_volume)", "awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum fi \"\"\") with tempfile.NamedTemporaryFile() as", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "import os import sys import subprocess import sys import tempfile", "# Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source", "= int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG:", "container. import glob import json import logging import os import", "of arguments for the above function # to consume. process_map", "'run', '--user', 'root', '--name', 'docker-puppet-%s' % config_volume, '--env', 'PUPPET_TAGS=%s' %", "(configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2] = '%s\\n%s' % (configs[config_volume][2], manifest)", "Version 2.0 (the \"License\"); you may # not use this", "'--user', 'root', '--name', 'docker-puppet-%s' % config_volume, '--env', 'PUPPET_TAGS=%s' % puppet_tags,", "[ -d \"$d\" ]; then rsync_srcs+=\" $d\" fi done rsync", "returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0] for pm in", "causes permission errors echo \"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json TAGS=\"\" if", "a single container pass makes sense and will save some", "$TAGS /etc/config.pp # Disables archiving if [ -z \"$NO_ARCHIVE\" ];", "len(service) > 4 else [] if not manifest or not", "puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2]", "ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO)", "this we merge shared settings together here. # # We", "during puppet run # This is useful for debugging mkdir", "[config_volume, puppet_tags, manifest, config_image, [volumes]] settings # that can be", "if os.environ.get('SHOW_DIFF', None): log.info('Diffing container: %s' % name) subproc =", "a # salt to trigger container restart when the config", "= f.read().rstrip() return hash_data def rm_container(name): if os.environ.get('SHOW_DIFF', None): log.info('Diffing", "for all configs. if config_volume in configs: # Append puppet", "name is also the same. configs = {} for service", "with open(outfile, 'w') as out_f: json.dump(infile_data, out_f) if not success:", "puppet_tags, manifest, config_image, volumes)): log.debug('config_volume %s' % config_volume) log.debug('puppet_tags %s'", "log.debug(cmd_stderr) # only delete successful runs, for debugging rm_container('docker-puppet-%s' %", "at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON # array", "We are also now specifying the container # in which", "Disables archiving if [ -z \"$NO_ARCHIVE\" ]; then archivedirs=(\"/etc\" \"/root\"", "if os.path.isfile(hashfile): with open(hashfile) as f: hash_data = f.read().rstrip() return", "if returncode != 0: log.error('ERROR configuring %s' % config_volume) success", "-c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print", "'--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted CA injection '--volume',", "permissions and limitations # under the License. # Shell script", "logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter", "'/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file) with open(config_file) as f: json_data", "container: {}\\n'.format(name): log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file", "config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON", "completed.') def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): log.debug('config_volume %s' %", "False # Update the startup configs with the config hash", "pull_image(name): log.info('Pulling image: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull',", "compliance with the License. You may obtain # a copy", "config_hash: env = v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash) log.debug(\"Updating config", "is useful for debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync -a -R", "is None: continue if isinstance(service, dict): service = [ service.get('config_volume'),", "though\" \" shared volumes are the same!\") else: log.info(\"Adding new", "-p /etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS", "and will save some time. # To support this we", "-p /var/lib/config-data/puppet-generated/${NAME} rsync -a -R -0 --delay-updates --delete-after \\ --files-from=<(find", "used to generate config files or run ad-hoc puppet modules", "log.warn(\"Config containers do not match even though\" \" shared volumes", "short_hostname(), '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s' % os.environ.get('STEP',", "as tmp_man: with open(tmp_man.name, 'w') as man_file: man_file.write('include ::tripleo::packages\\n') man_file.write(manifest)", "0: log.error('ERROR configuring %s' % config_volume) success = False #", "# # Unless required by applicable law or agreed to", "/etc/puppet/ssl # not in use and causes permission errors echo", "configuring 'shared' services at the same # time. For example", "each configuration. Defaults # to the number of CPUs on", "\"hashed-\" + os.path.basename(infile)) with open(outfile, 'w') as out_f: json.dump(infile_data, out_f)", "if [ -z \"$NO_ARCHIVE\" ]; then archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\"", "puppet_tags, manifest, config_image, volumes]) for p in process_map: log.debug('- %s'", "configuration. Defaults # to the number of CPUs on the", "is to match what we do in deployed-server def short_hostname():", "open(tmp_man.name, 'w') as man_file: man_file.write('include ::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume)", "os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',", "[]) config_volume=None for v in volumes: if v.startswith(prefix): config_volume =", "> 4 else [] if not manifest or not config_image:", "'/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash set", "environment variables as # they contain the access data for", "off of config volume for all configs. if config_volume in", "config volume for all configs. if config_volume in configs: #", "import logging import os import sys import subprocess import sys", "the DOCKER_* environment variables as # they contain the access", "success = False # Update the startup configs with the", "cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) def", "startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles =", "workaround LP1696283 mkdir -p /etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet", "modified during puppet run # This is useful for debugging", "if [ -n \"$PUPPET_TAGS\" ]; then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi #", "'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout:", "'/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection '--volume', '%s:%s:rw' % (sh_script,", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "or not config_image: continue log.info('config_volume %s' % config_volume) log.info('puppet_tags %s'", "they contain the access data for the docker daemon. for", "fi # workaround LP1696283 mkdir -p /etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME", "% puppet_tags) log.debug('manifest %s' % manifest) log.debug('config_image %s' % config_image)", "os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s' % os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' %", "are also now specifying the container # in which the", "rm -Rf /etc/puppet/ssl # not in use and causes permission", "cmd_stderr: log.debug(cmd_stderr) log.info('Removing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker',", "# Update the startup configs with the config hash we", "may obtain # a copy of the License at #", "success = True for returncode, config_volume in zip(returncodes, config_volumes): if", "Unless required by applicable law or agreed to in writing,", "sense and will save some time. # To support this", "mounted config volume - we can't just use the #", "json.load(f) # To save time we support configuring 'shared' services", "get_config_hash(prefix, config_volume): hashfile = os.path.join(prefix, \"%s.md5sum\" % config_volume) hash_data =", "subprocess import sys import tempfile import multiprocessing log = logging.getLogger()", "[volumes]] settings # that can be used to generate config", "= [] for config_volume in configs: service = configs[config_volume] puppet_tags", "# Also make a copy of files modified during puppet", "env = v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash) log.debug(\"Updating config hash", "for debugging rm_container('docker-puppet-%s' % config_volume) return subproc.returncode # Holds all", "in (json_data or []): if service is None: continue if", "process_map = [] for config_volume in configs: service = configs[config_volume]", "# Disables archiving if [ -z \"$NO_ARCHIVE\" ]; then archivedirs=(\"/etc\"", "not config_image: continue log.info('config_volume %s' % config_volume) log.info('puppet_tags %s' %", "% volumes) # We key off of config volume for", "= os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file) with open(config_file) as", "] config_volume = service[0] or '' puppet_tags = service[1] or", "tags and manifest. log.info(\"Existing service, appending puppet tags and manifest\")", "Defaults # to the number of CPUs on the system.", "log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file)", "% (configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2] = '%s\\n%s' % (configs[config_volume][2],", "'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout:", "in a single container pass makes sense and will save", "either express or implied. See the # License for the", "rm_container('docker-puppet-%s' % config_volume) pull_image(config_image) dcmd = ['/usr/bin/docker', 'run', '--user', 'root',", "log.debug(cmd_stdout) if cmd_stderr and \\ cmd_stderr != 'Error response from", "linearly we run them using a process # pool. This", "may # not use this file except in compliance with", "manifest) if configs[config_volume][3] != config_image: log.warn(\"Config containers do not match", "apply --verbose $TAGS /etc/config.pp # Disables archiving if [ -z", "dict): service = [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []),", "log.addHandler(ch) # this is to match what we do in", "md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum fi \"\"\") with", "os.path.basename(infile)) with open(outfile, 'w') as out_f: json.dump(infile_data, out_f) if not", "config_volume) log.debug('puppet_tags %s' % puppet_tags) log.debug('manifest %s' % manifest) log.debug('config_image", "get_config_hash(config_volume_prefix, config_volume) if config_hash: env = v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\" %", "for volume in volumes: if volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script])", "docker daemon. for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): env[k]", "if cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix, config): # Match the mounted", "mkdir -p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl", "k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): env[k] = os.environ.get(k) if", "--delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} # Also make a copy of", "the config hash we generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data')", "some time. # To support this we merge shared settings", "= os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json')", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "# Match the mounted config volume - we can't just", "only delete successful runs, for debugging rm_container('docker-puppet-%s' % config_volume) return", "stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() return cmd_stdout.rstrip() def pull_image(name): log.info('Pulling", "delete successful runs, for debugging rm_container('docker-puppet-%s' % config_volume) return subproc.returncode", "config_volume) log.info('puppet_tags %s' % puppet_tags) log.info('manifest %s' % manifest) log.info('config_image", "] for volume in volumes: if volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint',", "the config-data dir, this is used as a # salt", "% config_image) log.debug('volumes %s' % volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with", "config_image, volumes]) for p in process_map: log.debug('- %s' % p)", "% os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s' % os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro'", "access data for the docker daemon. for k in filter(lambda", "log.info('config_image %s' % config_image) log.info('volumes %s' % volumes) # We", "= service log.info('Service compilation completed.') def mp_puppet_config((config_volume, puppet_tags, manifest, config_image,", "the information for each process to consume. # Instead of", "= subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr and \\ cmd_stderr", "% (configs[config_volume][2], manifest) if configs[config_volume][3] != config_image: log.warn(\"Config containers do", "copy the DOCKER_* environment variables as # they contain the", "filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): env[k] = os.environ.get(k) if os.environ.get('NET_HOST', 'false')", "We key off of config_volume as this should be the", "%s' % puppet_tags) log.info('manifest %s' % manifest) log.info('config_image %s' %", "to the number of CPUs on the system. p =", "'w') as script_file: os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash set -ex mkdir -p", "% puppet_tags else: puppet_tags = \"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags, manifest, config_image,", "sh_script]) env = {} # NOTE(flaper87): Always copy the DOCKER_*", "service\") configs[config_volume] = service log.info('Service compilation completed.') def mp_puppet_config((config_volume, puppet_tags,", "'--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection '--volume', '%s:%s:rw' % (sh_script, sh_script)", "volumes) # We key off of config volume for all", "'' config_image = service[3] or '' volumes = service[4] if", "'%s,%s' % (configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2] = '%s\\n%s' %", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON #", "with open(infile) as f: infile_data = json.load(f) for k, v", "mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync -a -R -0 --delay-updates --delete-after \\", "# pool. This creates a list of arguments for the", "configs[config_volume] = service log.info('Service compilation completed.') def mp_puppet_config((config_volume, puppet_tags, manifest,", "manifest. log.info(\"Existing service, appending puppet tags and manifest\") if puppet_tags:", "the same!\") else: log.info(\"Adding new service\") configs[config_volume] = service log.info('Service", "cmd_stdout.rstrip() def pull_image(name): log.info('Pulling image: %s' % name) subproc =", "(json_data or []): if service is None: continue if isinstance(service,", "# OpenSSL trusted CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume',", "%s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "successful runs, for debugging rm_container('docker-puppet-%s' % config_volume) return subproc.returncode #", "docker container image. # Uses the config file at /var/lib/docker-puppet/docker-puppet.json", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "startup configs with the config hash we generated above config_volume_prefix", "log.debug('CONFIG: %s' % config_file) with open(config_file) as f: json_data =", "instances where the volume name is also the same. configs", "configs: service = configs[config_volume] puppet_tags = service[1] or '' manifest", "Match the mounted config volume - we can't just use", "'true': log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker", "cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr and", "% name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout,", "in process_map: log.debug('- %s' % p) # Fire off processes", "rm_container(name): if os.environ.get('SHOW_DIFF', None): log.info('Diffing container: %s' % name) subproc", "of services. We are also now specifying the container #", "for service in (json_data or []): if service is None:", "/etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl # not", "cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) log.info('Removing", "% config_volume) pull_image(config_image) dcmd = ['/usr/bin/docker', 'run', '--user', 'root', '--name',", "-a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} # Also make a", "% config_volume) return subproc.returncode # Holds all the information for", "config_volume in configs: service = configs[config_volume] puppet_tags = service[1] or", "them all linearly we run them using a process #", "'Error response from daemon: ' \\ 'No such container: {}\\n'.format(name):", "Holds all the information for each process to consume. #", "= glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles: with open(infile) as f:", "f: infile_data = json.load(f) for k, v in infile_data.iteritems(): config_volume", "%s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "else: if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) # only delete", "now specifying the container # in which the services should", "the specific language governing permissions and limitations # under the", "Instead of starting them all linearly we run them using", "os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')", "under the Apache License, Version 2.0 (the \"License\"); you may", "p = multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0]", "'/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted", "= configs[config_volume] puppet_tags = service[1] or '' manifest = service[2]", "variables as # they contain the access data for the", "to perform each configuration. Defaults # to the number of", "inside of a container. import glob import json import logging", "the number of CPUs on the system. p = multiprocessing.Pool(process_count)", "puppet_tags) if manifest: configs[config_volume][2] = '%s\\n%s' % (configs[config_volume][2], manifest) if", "log.debug(\"Updating config hash for %s, config_volume=%s hash=%s\" % (k, config_volume,", "def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): log.debug('config_volume %s' % config_volume)", "% config_file) with open(config_file) as f: json_data = json.load(f) #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "os.environ.keys()): env[k] = os.environ.get(k) if os.environ.get('NET_HOST', 'false') == 'true': log.debug('NET_HOST", "process to consume. # Instead of starting them all linearly", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "% config_image) log.info('volumes %s' % volumes) # We key off", "if len(service) > 4 else [] if not manifest or", "# in which the services should be configured. This should", "--verbose $TAGS /etc/config.pp # Disables archiving if [ -z \"$NO_ARCHIVE\"", "required by applicable law or agreed to in writing, software", "in volumes: if volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script]) env =", "json_data = json.load(f) # To save time we support configuring", "]; then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi # workaround LP1696283 mkdir -p", "log.info('Pulling image: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],", "for infile in infiles: with open(infile) as f: infile_data =", "for a JSON # array of [config_volume, puppet_tags, manifest, config_image,", "tmp_man: with open(tmp_man.name, 'w') as man_file: man_file.write('include ::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s'", "% (sh_script, sh_script) ] for volume in volumes: if volume:", "agreed to in writing, software # distributed under the License", "%s' % config_volume) log.info('puppet_tags %s' % puppet_tags) log.info('manifest %s' %", "services should be configured. This should match # in all", "distributed under the License is distributed on an \"AS IS\"", "subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr = subproc.communicate() if subproc.returncode", "puppet run # This is useful for debugging mkdir -p", "CONDITIONS OF ANY KIND, either express or implied. See the", "config_volume) pull_image(config_image) dcmd = ['/usr/bin/docker', 'run', '--user', 'root', '--name', 'docker-puppet-%s'", "logging.getLogger() ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else:", "config_volumes): if returncode != 0: log.error('ERROR configuring %s' % config_volume)", "generate config files or run ad-hoc puppet modules # inside", "[ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []), ] config_volume =", "changes tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum |", "for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): env[k] = os.environ.get(k)", "the volume name is also the same. configs = {}", "config_volume = match_config_volume(config_volume_prefix, v) if config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume)", "if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) # only delete successful", "subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr = subproc.communicate()", "if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter =", "checksum of the config-data dir, this is used as a", "'/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted CA injection", "(sh_script, sh_script) ] for volume in volumes: if volume: dcmd.extend(['--volume',", "if cmd_stderr: log.debug(cmd_stderr) # only delete successful runs, for debugging", "all instances where the volume name is also the same.", "else: puppet_tags = \"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for", "config_volume): hashfile = os.path.join(prefix, \"%s.md5sum\" % config_volume) hash_data = None", "--files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \\ / /var/lib/config-data/puppet-generated/${NAME} # Write", "\"novacomute\" consumes config-data/nova volumes = config.get('volumes', []) config_volume=None for v", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "# not in use and causes permission errors echo \"{\\\\\"step\\\\\":", "save time we support configuring 'shared' services at the same", "\\ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \\ / /var/lib/config-data/puppet-generated/${NAME} #", "See the # License for the specific language governing permissions", "0: log.error('Failed running docker-puppet.py for %s' % config_volume) if cmd_stdout:", "# inside of a container. import glob import json import", "for pm in process_map] success = True for returncode, config_volume", "outfile = os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile)) with open(outfile, 'w') as", "of the config-data dir, this is used as a #", "= service[4] if len(service) > 4 else [] if puppet_tags:", "= os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile)) with open(outfile, 'w') as out_f:", "To save time we support configuring 'shared' services at the", "json import logging import os import sys import subprocess import", "% volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as script_file:", "# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings #", "just use the # key as e.g \"novacomute\" consumes config-data/nova", "glob import json import logging import os import sys import", "law or agreed to in writing, software # distributed under", "is also the same. configs = {} for service in", "cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix, config): # Match the mounted config", "volumes are the same!\") else: log.info(\"Adding new service\") configs[config_volume] =", "# We key off of config_volume as this should be", "if not manifest or not config_image: continue log.info('config_volume %s' %", "\"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json TAGS=\"\" if [ -n \"$PUPPET_TAGS\" ];", "which the services should be configured. This should match #", "def rm_container(name): if os.environ.get('SHOW_DIFF', None): log.info('Diffing container: %s' % name)", "hash we generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s'", "limitations # under the License. # Shell script tool to", "'.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr =", "in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): env[k] = os.environ.get(k) if os.environ.get('NET_HOST',", "/var/lib/config-data/puppet-generated/${NAME} # Write a checksum of the config-data dir, this", "= match_config_volume(config_volume_prefix, v) if config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume) if", "information for each process to consume. # Instead of starting", "config_volume in configs: # Append puppet tags and manifest. log.info(\"Existing", "glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles: with open(infile) as f: infile_data", "Shell script tool to run puppet inside of the given", "be configured. This should match # in all instances where", "match_config_volume(prefix, config): # Match the mounted config volume - we", "]; then archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\" for d", "'' volumes = service[4] if len(service) > 4 else []", "def get_config_hash(prefix, config_volume): hashfile = os.path.join(prefix, \"%s.md5sum\" % config_volume) hash_data", "for %s' % config_volume) if cmd_stdout: log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr)", "if cmd_stderr: log.debug(cmd_stderr) log.info('Removing container: %s' % name) subproc =", "to generate config files or run ad-hoc puppet modules #", "# # Licensed under the Apache License, Version 2.0 (the", "% config_volume) hash_data = None if os.path.isfile(hashfile): with open(hashfile) as", "\"file,file_line,concat,augeas,%s\" % puppet_tags else: puppet_tags = \"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags, manifest,", "log.error('ERROR configuring %s' % config_volume) success = False # Update", "governing permissions and limitations # under the License. # Shell", "tool to run puppet inside of the given docker container", "zip(returncodes, config_volumes): if returncode != 0: log.error('ERROR configuring %s' %", "with the config hash we generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX',", "(k, config_volume, config_hash)) infile_data[k]['environment'] = env outfile = os.path.join(os.path.dirname(infile), \"hashed-\"", "config_hash)) infile_data[k]['environment'] = env outfile = os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile))", "'--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker command: %s' % ' '.join(dcmd))", "such container: {}\\n'.format(name): log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet')", "continue log.info('config_volume %s' % config_volume) log.info('puppet_tags %s' % puppet_tags) log.info('manifest", "v in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v) if config_volume: config_hash", "= subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate()", "# NOTE(flaper87): Always copy the DOCKER_* environment variables as #", "k.startswith('DOCKER'), os.environ.keys()): env[k] = os.environ.get(k) if os.environ.get('NET_HOST', 'false') == 'true':", "Update the startup configs with the config hash we generated", "% ' '.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout,", "even though\" \" shared volumes are the same!\") else: log.info(\"Adding", "for p in process_map: log.debug('- %s' % p) # Fire", "- we can't just use the # key as e.g", "for %s, config_volume=%s hash=%s\" % (k, config_volume, config_hash)) infile_data[k]['environment'] =", "$rsync_srcs /var/lib/config-data/${NAME} # Also make a copy of files modified", "configs[config_volume] puppet_tags = service[1] or '' manifest = service[2] or", "open(infile) as f: infile_data = json.load(f) for k, v in", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "the services should be configured. This should match # in", "in all instances where the volume name is also the", "name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr", "service[3] or '' volumes = service[4] if len(service) > 4", "f.read().rstrip() return hash_data def rm_container(name): if os.environ.get('SHOW_DIFF', None): log.info('Diffing container:", "if isinstance(service, dict): service = [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'),", "config_volume) if config_hash: env = v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash)", "configs[config_volume][3] != config_image: log.warn(\"Config containers do not match even though\"", "-d \"$d\" ]; then rsync_srcs+=\" $d\" fi done rsync -a", "all configs. if config_volume in configs: # Append puppet tags", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "= subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate()", "tags and manifest\") if puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],", "under the License. # Shell script tool to run puppet", "configs: # Append puppet tags and manifest. log.info(\"Existing service, appending", "runs, for debugging rm_container('docker-puppet-%s' % config_volume) return subproc.returncode # Holds", "= service[1] or '' manifest = service[2] or '' config_image", "To support this we merge shared settings together here. #", "process_map)) config_volumes = [pm[0] for pm in process_map] success =", "open(config_file) as f: json_data = json.load(f) # To save time", "%s' % volumes) # We key off of config volume", "'--name', 'docker-puppet-%s' % config_volume, '--env', 'PUPPET_TAGS=%s' % puppet_tags, '--env', 'NAME=%s'", "hash_data def rm_container(name): if os.environ.get('SHOW_DIFF', None): log.info('Diffing container: %s' %", "= '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash", "# that can be used to generate config files or", "We key off of config volume for all configs. if", "configuring all of the heat services # in a single", "of files modified during puppet run # This is useful", "perform each configuration. Defaults # to the number of CPUs", "service in (json_data or []): if service is None: continue", "Also make a copy of files modified during puppet run", "we do in deployed-server def short_hostname(): subproc = subprocess.Popen(['hostname', '-s'],", "%s' % config_image) log.debug('volumes %s' % volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh'", "the config changes tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' |", "configured. This should match # in all instances where the", "% startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles: with", "as # they contain the access data for the docker", "pass makes sense and will save some time. # To", "hash_data = None if os.path.isfile(hashfile): with open(hashfile) as f: hash_data", "[ -z \"$NO_ARCHIVE\" ]; then archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\")", "for d in \"${archivedirs[@]}\"; do if [ -d \"$d\" ];", "(configs[config_volume][2], manifest) if configs[config_volume][3] != config_image: log.warn(\"Config containers do not", "config hash for %s, config_volume=%s hash=%s\" % (k, config_volume, config_hash))", "config files or run ad-hoc puppet modules # inside of", "infile in infiles: with open(infile) as f: infile_data = json.load(f)", "-R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} # Also make a copy", "config_volume=%s hash=%s\" % (k, config_volume, config_hash)) infile_data[k]['environment'] = env outfile", "-newer /etc/ssh/ssh_known_hosts -print0) \\ / /var/lib/config-data/puppet-generated/${NAME} # Write a checksum", "subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if", "hash=%s\" % (k, config_volume, config_hash)) infile_data[k]['environment'] = env outfile =", "'HOSTNAME=%s' % short_hostname(), '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s'", "config_volume in zip(returncodes, config_volumes): if returncode != 0: log.error('ERROR configuring", "script_file.write(\"\"\"#!/bin/bash set -ex mkdir -p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet", "in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v) if config_volume: config_hash =", "and manifest\") if puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags)", "logging import os import sys import subprocess import sys import", "$d\" fi done rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}", "returncode != 0: log.error('ERROR configuring %s' % config_volume) success =", "cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) # only delete successful runs,", "Append puppet tags and manifest. log.info(\"Existing service, appending puppet tags", "script injection '--volume', '%s:%s:rw' % (sh_script, sh_script) ] for volume", "and manifest. log.info(\"Existing service, appending puppet tags and manifest\") if", "specifying the container # in which the services should be", "of the heat services # in a single container pass", "puppet inside of the given docker container image. # Uses", "# in all instances where the volume name is also", "OF ANY KIND, either express or implied. See the #", "service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []), ] config_volume = service[0]", "Write a checksum of the config-data dir, this is used", "if configs[config_volume][3] != config_image: log.warn(\"Config containers do not match even", "of starting them all linearly we run them using a", "in writing, software # distributed under the License is distributed", "in \"${archivedirs[@]}\"; do if [ -d \"$d\" ]; then rsync_srcs+=\"", "copy of files modified during puppet run # This is", "a process # pool. This creates a list of arguments", "= logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this is to", "container image. # Uses the config file at /var/lib/docker-puppet/docker-puppet.json as", "log.info('config_volume %s' % config_volume) log.info('puppet_tags %s' % puppet_tags) log.info('manifest %s'", "of the given docker container image. # Uses the config", "import sys import tempfile import multiprocessing log = logging.getLogger() ch", "return hash_data def rm_container(name): if os.environ.get('SHOW_DIFF', None): log.info('Diffing container: %s'", "/var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum", "that can be used to generate config files or run", "number of CPUs on the system. p = multiprocessing.Pool(process_count) returncodes", "subproc.returncode != 0: log.error('Failed running docker-puppet.py for %s' % config_volume)", "manifest) log.debug('config_image %s' % config_image) log.debug('volumes %s' % volumes) sh_script", "-f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}'", "if manifest: configs[config_volume][2] = '%s\\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3]", "'/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile", "startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles: with open(infile)", "with the License. You may obtain # a copy of", "JSON # array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings", "--mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum fi", "source for a JSON # array of [config_volume, puppet_tags, manifest,", "data for the docker daemon. for k in filter(lambda k:", "for the docker daemon. for k in filter(lambda k: k.startswith('DOCKER'),", "= list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0] for pm in process_map]", "process_map: log.debug('- %s' % p) # Fire off processes to", "= subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() return", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"$PUPPET_TAGS\" ]; then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi # workaround LP1696283 mkdir", "subproc.communicate() return cmd_stdout.rstrip() def pull_image(name): log.info('Pulling image: %s' % name)", "cmd_stderr: log.error(cmd_stderr) else: if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) #", "consume. # Instead of starting them all linearly we run", "hash_data = f.read().rstrip() return hash_data def rm_container(name): if os.environ.get('SHOW_DIFF', None):", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE,", "sh_script) ] for volume in volumes: if volume: dcmd.extend(['--volume', volume])", "[]): if service is None: continue if isinstance(service, dict): service", "short_hostname(): subproc = subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr =", "except in compliance with the License. You may obtain #", "service = configs[config_volume] puppet_tags = service[1] or '' manifest =", "subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr =", "tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk", "'{print $1}' > /var/lib/config-data/${NAME}.md5sum fi \"\"\") with tempfile.NamedTemporaryFile() as tmp_man:", "injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', #", "log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr) else: if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr:", "%s' % manifest) log.debug('config_image %s' % config_image) log.debug('volumes %s' %", "# distributed under the License is distributed on an \"AS", "each process to consume. # Instead of starting them all", "# Unless required by applicable law or agreed to in", "or '' manifest = service[2] or '' config_image = service[3]", "\\ 'No such container: {}\\n'.format(name): log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count()))", "debugging rm_container('docker-puppet-%s' % config_volume) return subproc.returncode # Holds all the", "> /etc/puppet/hieradata/docker.json TAGS=\"\" if [ -n \"$PUPPET_TAGS\" ]; then TAGS=\"--tags", "is used as a # salt to trigger container restart", "not match even though\" \" shared volumes are the same!\")", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "['/usr/bin/docker', 'run', '--user', 'root', '--name', 'docker-puppet-%s' % config_volume, '--env', 'PUPPET_TAGS=%s'", "= {} for service in (json_data or []): if service", "the same for a # given group of services. We", "as f: json_data = json.load(f) # To save time we", "service[1] or '' manifest = service[2] or '' config_image =", "rsync -a -R -0 --delay-updates --delete-after \\ --files-from=<(find $rsync_srcs -newer", "stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() return cmd_stdout.rstrip() def pull_image(name):", "open(hashfile) as f: hash_data = f.read().rstrip() return hash_data def rm_container(name):", "script_file: os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash set -ex mkdir -p /etc/puppet cp", "'/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN:", "with open(hashfile) as f: hash_data = f.read().rstrip() return hash_data def", "% config_volume) log.debug('puppet_tags %s' % puppet_tags) log.debug('manifest %s' % manifest)", "volume]) dcmd.extend(['--entrypoint', sh_script]) env = {} # NOTE(flaper87): Always copy", "using a process # pool. This creates a list of", "match # in all instances where the volume name is", "with open(tmp_man.name, 'w') as man_file: man_file.write('include ::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s' %", "name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr", "contain the access data for the docker daemon. for k", "or []): if service is None: continue if isinstance(service, dict):", "run them using a process # pool. This creates a", "in deployed-server def short_hostname(): subproc = subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "= subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix,", "name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout)", "specific language governing permissions and limitations # under the License.", "a copy of files modified during puppet run # This", "log.error('Failed running docker-puppet.py for %s' % config_volume) if cmd_stdout: log.error(cmd_stdout)", "# not use this file except in compliance with the", "/ /var/lib/config-data/puppet-generated/${NAME} # Write a checksum of the config-data dir,", "%s' % manifest) log.info('config_image %s' % config_image) log.info('volumes %s' %", "we support configuring 'shared' services at the same # time.", "same for a # given group of services. We are", "\"%s.md5sum\" % config_volume) hash_data = None if os.path.isfile(hashfile): with open(hashfile)", "log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) log.info('Removing container: %s' % name) subproc", "v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash) log.debug(\"Updating config hash for %s,", "a checksum of the config-data dir, this is used as", "import json import logging import os import sys import subprocess", "config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file) with open(config_file)", "/etc/ssh/ssh_known_hosts -print0) \\ / /var/lib/config-data/puppet-generated/${NAME} # Write a checksum of", "logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this is to match", "rsync_srcs=\"\" for d in \"${archivedirs[@]}\"; do if [ -d \"$d\"", "$STEP}\" > /etc/puppet/hieradata/docker.json TAGS=\"\" if [ -n \"$PUPPET_TAGS\" ]; then", "#!/usr/bin/env python # # Licensed under the Apache License, Version", "under the License is distributed on an \"AS IS\" BASIS,", "also the same. configs = {} for service in (json_data", "of config volume for all configs. if config_volume in configs:", "be used to generate config files or run ad-hoc puppet", "modules # inside of a container. import glob import json", "config-data/nova volumes = config.get('volumes', []) config_volume=None for v in volumes:", "this file except in compliance with the License. You may", "save some time. # To support this we merge shared", "# script injection '--volume', '%s:%s:rw' % (sh_script, sh_script) ] for", "-ex mkdir -p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf", "%s' % ' '.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)", "% config_volume, '--env', 'PUPPET_TAGS=%s' % puppet_tags, '--env', 'NAME=%s' % config_volume,", "[] if not manifest or not config_image: continue log.info('config_volume %s'", "json.load(f) for k, v in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v)", "cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl # not in", "trusted CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume',", "\"$NO_ARCHIVE\" ]; then archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\" for", "file except in compliance with the License. You may obtain", "'STEP=%s' % os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',", "OpenSSL trusted CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',", "config_volume, config_hash)) infile_data[k]['environment'] = env outfile = os.path.join(os.path.dirname(infile), \"hashed-\" +", "key off of config volume for all configs. if config_volume", "given docker container image. # Uses the config file at", "OR CONDITIONS OF ANY KIND, either express or implied. See", "manifest, config_image, volumes]) for p in process_map: log.debug('- %s' %", "multiprocessing log = logging.getLogger() ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False):", "run # This is useful for debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME}", "ad-hoc puppet modules # inside of a container. import glob", "configuring %s' % config_volume) success = False # Update the", "'/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection", "% config_hash) log.debug(\"Updating config hash for %s, config_volume=%s hash=%s\" %", "\"\"\") with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w') as man_file:", "= subproc.communicate() if subproc.returncode != 0: log.error('Failed running docker-puppet.py for", "log.error(cmd_stderr) else: if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) # only", "# workaround LP1696283 mkdir -p /etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker", "p in process_map: log.debug('- %s' % p) # Fire off", "log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) # only delete successful runs, for", "'--env', 'HOSTNAME=%s' % short_hostname(), '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), '--env',", "config.get('volumes', []) config_volume=None for v in volumes: if v.startswith(prefix): config_volume", "% tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/',", "deployed-server def short_hostname(): subproc = subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout,", "% manifest) log.debug('config_image %s' % config_image) log.debug('volumes %s' % volumes)", "also now specifying the container # in which the services", "we run them using a process # pool. This creates", "License. # Shell script tool to run puppet inside of", "and limitations # under the License. # Shell script tool", "dir, this is used as a # salt to trigger", "config hash we generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX:", "the container # in which the services should be configured.", "manifest) log.info('config_image %s' % config_image) log.info('volumes %s' % volumes) #", "import multiprocessing log = logging.getLogger() ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG',", "the same # time. For example configuring all of the", "all of the heat services # in a single container", "!= config_image: log.warn(\"Config containers do not match even though\" \"", "::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume) pull_image(config_image) dcmd = ['/usr/bin/docker', 'run',", "as f: hash_data = f.read().rstrip() return hash_data def rm_container(name): if", "config_image, volumes)): log.debug('config_volume %s' % config_volume) log.debug('puppet_tags %s' % puppet_tags)", "'/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker command: %s' % ' '.join(dcmd)) subproc", "configs = {} for service in (json_data or []): if", "with open(config_file) as f: json_data = json.load(f) # To save", "if config_volume in configs: # Append puppet tags and manifest.", "creates a list of arguments for the above function #", "writing, software # distributed under the License is distributed on", "manifest: configs[config_volume][2] = '%s\\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3] !=", "config_volume, '--env', 'PUPPET_TAGS=%s' % puppet_tags, '--env', 'NAME=%s' % config_volume, '--env',", "tempfile import multiprocessing log = logging.getLogger() ch = logging.StreamHandler(sys.stdout) if", "what we do in deployed-server def short_hostname(): subproc = subprocess.Popen(['hostname',", "cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr)", "the License. You may obtain # a copy of the", "--delay-updates --delete-after \\ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \\ /", "use this file except in compliance with the License. You", "{} # NOTE(flaper87): Always copy the DOCKER_* environment variables as", "log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in", "'%s\\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3] != config_image: log.warn(\"Config containers", "stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr = subproc.communicate() if subproc.returncode != 0:", "match what we do in deployed-server def short_hostname(): subproc =", "support configuring 'shared' services at the same # time. For", "heat services # in a single container pass makes sense", "--delete-after \\ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \\ / /var/lib/config-data/puppet-generated/${NAME}", "\"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\" for d in \"${archivedirs[@]}\"; do if [", "log.debug('config_image %s' % config_image) log.debug('volumes %s' % volumes) sh_script =", "/etc/config.pp # Disables archiving if [ -z \"$NO_ARCHIVE\" ]; then", "'--env', 'NAME=%s' % config_volume, '--env', 'HOSTNAME=%s' % short_hostname(), '--env', 'NO_ARCHIVE=%s'", "-n \"$PUPPET_TAGS\" ]; then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi # workaround LP1696283", "''), '--env', 'STEP=%s' % os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name,", "'/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection '--volume', '%s:%s:rw' % (sh_script, sh_script) ]", "service[4] if len(service) > 4 else [] if puppet_tags: puppet_tags", "env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash) log.debug(\"Updating config hash for %s, config_volume=%s hash=%s\"", "= \"file,file_line,concat,augeas,%s\" % puppet_tags else: puppet_tags = \"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags,", "'--volume', '%s:%s:rw' % (sh_script, sh_script) ] for volume in volumes:", "config_volume as this should be the same for a #", "\"/var/www\") rsync_srcs=\"\" for d in \"${archivedirs[@]}\"; do if [ -d", "cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix, config): # Match", "express or implied. See the # License for the specific", "in infiles: with open(infile) as f: infile_data = json.load(f) for", "config_hash = get_config_hash(config_volume_prefix, config_volume) if config_hash: env = v.get('environment', [])", "the Apache License, Version 2.0 (the \"License\"); you may #", "% puppet_tags) log.info('manifest %s' % manifest) log.info('config_image %s' % config_image)", "' \\ 'No such container: {}\\n'.format(name): log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT',", "'--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume',", "to consume. process_map = [] for config_volume in configs: service", "fi done rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} #", "in configs: service = configs[config_volume] puppet_tags = service[1] or ''", "stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if", "> /var/lib/config-data/${NAME}.md5sum fi \"\"\") with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name,", "config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles", "if subproc.returncode != 0: log.error('Failed running docker-puppet.py for %s' %", "% config_volume, '--env', 'HOSTNAME=%s' % short_hostname(), '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE',", "not manifest or not config_image: continue log.info('config_volume %s' % config_volume)", "'--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection '--volume', '%s:%s:rw' %", "be the same for a # given group of services.", "same # time. For example configuring all of the heat", "\" shared volumes are the same!\") else: log.info(\"Adding new service\")", "time. # To support this we merge shared settings together", "= os.environ.get(k) if os.environ.get('NET_HOST', 'false') == 'true': log.debug('NET_HOST enabled') dcmd.extend(['--net',", "'-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() return cmd_stdout.rstrip() def", "config volume - we can't just use the # key", "given group of services. We are also now specifying the", "'NAME=%s' % config_volume, '--env', 'HOSTNAME=%s' % short_hostname(), '--env', 'NO_ARCHIVE=%s' %", "os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash set -ex mkdir -p /etc/puppet cp -a", "\"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for p in process_map:", "config): # Match the mounted config volume - we can't", "% name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout,", "config_image: continue log.info('config_volume %s' % config_volume) log.info('puppet_tags %s' % puppet_tags)", "'docker-puppet-%s' % config_volume, '--env', 'PUPPET_TAGS=%s' % puppet_tags, '--env', 'NAME=%s' %", "cmd_stderr: log.debug(cmd_stderr) # only delete successful runs, for debugging rm_container('docker-puppet-%s'", "open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash set -ex mkdir", "'--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection '--volume',", "dcmd.extend(['--entrypoint', sh_script]) env = {} # NOTE(flaper87): Always copy the", "a container. import glob import json import logging import os", "if volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script]) env = {} #", "else: log.info(\"Adding new service\") configs[config_volume] = service log.info('Service compilation completed.')", "if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix, config): #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "if service is None: continue if isinstance(service, dict): service =", "volumes]) for p in process_map: log.debug('- %s' % p) #", "volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script]) env = {} # NOTE(flaper87):", "infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles: with open(infile) as", "log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s'", "# Shell script tool to run puppet inside of the", "configs[config_volume][2] = '%s\\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3] != config_image:", "= json.load(f) for k, v in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix,", "fi \"\"\") with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w') as", "= logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO)", "if len(service) > 4 else [] if puppet_tags: puppet_tags =", "services # in a single container pass makes sense and", "NOTE(flaper87): Always copy the DOCKER_* environment variables as # they", "% p) # Fire off processes to perform each configuration.", "= None if os.path.isfile(hashfile): with open(hashfile) as f: hash_data =", "%s, config_volume=%s hash=%s\" % (k, config_volume, config_hash)) infile_data[k]['environment'] = env", "for each process to consume. # Instead of starting them", "License for the specific language governing permissions and limitations #", "config_volume) if cmd_stdout: log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr) else: if cmd_stdout:", "config_volume = os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0] break return config_volume def get_config_hash(prefix,", "'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted CA", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "daemon: ' \\ 'No such container: {}\\n'.format(name): log.debug(cmd_stderr) process_count =", "multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' %", "same!\") else: log.info(\"Adding new service\") configs[config_volume] = service log.info('Service compilation", "/var/lib/config-data/${NAME} # Also make a copy of files modified during", "are the same!\") else: log.info(\"Adding new service\") configs[config_volume] = service", "archiving if [ -z \"$NO_ARCHIVE\" ]; then archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\"", "'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s' % os.environ.get('STEP', '6'), '--volume',", "puppet_tags = \"file,file_line,concat,augeas,%s\" % puppet_tags else: puppet_tags = \"file,file_line,concat,augeas\" process_map.append([config_volume,", "infile_data = json.load(f) for k, v in infile_data.iteritems(): config_volume =", "pull_image(config_image) dcmd = ['/usr/bin/docker', 'run', '--user', 'root', '--name', 'docker-puppet-%s' %", "ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter)", "# # We key off of config_volume as this should", "a JSON # array of [config_volume, puppet_tags, manifest, config_image, [volumes]]", "man_file: man_file.write('include ::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume) pull_image(config_image) dcmd =", "-R -0 --delay-updates --delete-after \\ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0)", "/tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl # not in use and", "volumes: if v.startswith(prefix): config_volume = os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0] break return", "the # key as e.g \"novacomute\" consumes config-data/nova volumes =", "cmd_stderr and \\ cmd_stderr != 'Error response from daemon: '", "= get_config_hash(config_volume_prefix, config_volume) if config_hash: env = v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\"", "# they contain the access data for the docker daemon.", "= subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr = subproc.communicate() if", "the # License for the specific language governing permissions and", "'PUPPET_TAGS=%s' % puppet_tags, '--env', 'NAME=%s' % config_volume, '--env', 'HOSTNAME=%s' %", "match even though\" \" shared volumes are the same!\") else:", "subproc.returncode # Holds all the information for each process to", "infile_data[k]['environment'] = env outfile = os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile)) with", "config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume) if config_hash: env = v.get('environment',", "here. # # We key off of config_volume as this", "shared volumes are the same!\") else: log.info(\"Adding new service\") configs[config_volume]", "[pm[0] for pm in process_map] success = True for returncode,", "puppet tags and manifest\") if puppet_tags: configs[config_volume][1] = '%s,%s' %", "log.debug(cmd_stderr) def match_config_volume(prefix, config): # Match the mounted config volume", "service, appending puppet tags and manifest\") if puppet_tags: configs[config_volume][1] =", "single container pass makes sense and will save some time.", "as a # salt to trigger container restart when the", "\"$d\" ]; then rsync_srcs+=\" $d\" fi done rsync -a -R", "return subproc.returncode # Holds all the information for each process", "config_image: log.warn(\"Config containers do not match even though\" \" shared", "infiles: with open(infile) as f: infile_data = json.load(f) for k,", "returncode, config_volume in zip(returncodes, config_volumes): if returncode != 0: log.error('ERROR", "= {} # NOTE(flaper87): Always copy the DOCKER_* environment variables", "support this we merge shared settings together here. # #", "or '' puppet_tags = service[1] or '' manifest = service[2]", "/etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp", "= service[2] or '' config_image = service[3] or '' volumes", "docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file) with", "containers do not match even though\" \" shared volumes are", "the heat services # in a single container pass makes", "python # # Licensed under the Apache License, Version 2.0", "None if os.path.isfile(hashfile): with open(hashfile) as f: hash_data = f.read().rstrip()", "if config_hash: env = v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash) log.debug(\"Updating", "service.get('step_config'), service.get('config_image'), service.get('volumes', []), ] config_volume = service[0] or ''", "and \\ cmd_stderr != 'Error response from daemon: ' \\", "# under the License. # Shell script tool to run", "if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr and \\ cmd_stderr != 'Error", "dcmd = ['/usr/bin/docker', 'run', '--user', 'root', '--name', 'docker-puppet-%s' % config_volume,", "% config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr = subproc.communicate() if subproc.returncode !=", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "--delete-after $rsync_srcs /var/lib/config-data/${NAME} # Also make a copy of files", "appending puppet tags and manifest\") if puppet_tags: configs[config_volume][1] = '%s,%s'", "%(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this is to match what", "then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi # workaround LP1696283 mkdir -p /etc/ssh", "you may # not use this file except in compliance", "log.info(\"Existing service, appending puppet tags and manifest\") if puppet_tags: configs[config_volume][1]", "image. # Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a", "with open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash set -ex", "do if [ -d \"$d\" ]; then rsync_srcs+=\" $d\" fi", "above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs", "'--env', 'STEP=%s' % os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume',", "import glob import json import logging import os import sys", "'w') as man_file: man_file.write('include ::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume) pull_image(config_image)", "language governing permissions and limitations # under the License. #", "in process_map] success = True for returncode, config_volume in zip(returncodes,", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "used as a # salt to trigger container restart when", "'6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume',", "generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)", "process # pool. This creates a list of arguments for", "cmd_stderr = subproc.communicate() if subproc.returncode != 0: log.error('Failed running docker-puppet.py", "of a container. import glob import json import logging import", "d in \"${archivedirs[@]}\"; do if [ -d \"$d\" ]; then", "if config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume) if config_hash: env =", "compilation completed.') def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): log.debug('config_volume %s'", "# key as e.g \"novacomute\" consumes config-data/nova volumes = config.get('volumes',", "%s' % config_image) log.info('volumes %s' % volumes) # We key", "puppet_tags = service[1] or '' manifest = service[2] or ''", "for the specific language governing permissions and limitations # under", "done rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} # Also", "log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) #", "FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp # Disables archiving", "os import sys import subprocess import sys import tempfile import", "'%s:%s:rw' % (sh_script, sh_script) ] for volume in volumes: if", "container restart when the config changes tar -c -f -", "inside of the given docker container image. # Uses the", "/etc/puppet/hieradata/docker.json TAGS=\"\" if [ -n \"$PUPPET_TAGS\" ]; then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\"", "all the information for each process to consume. # Instead", "script tool to run puppet inside of the given docker", "= subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate()", "sys import tempfile import multiprocessing log = logging.getLogger() ch =", "# Write a checksum of the config-data dir, this is", "v) if config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume) if config_hash: env", "config_volume def get_config_hash(prefix, config_volume): hashfile = os.path.join(prefix, \"%s.md5sum\" % config_volume)", "f: json_data = json.load(f) # To save time we support", "- /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' >", "!= 0: log.error('ERROR configuring %s' % config_volume) success = False", "CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',", "]; then rsync_srcs+=\" $d\" fi done rsync -a -R --delay-updates", "!= 'Error response from daemon: ' \\ 'No such container:", "volume for all configs. if config_volume in configs: # Append", "config_volume=None for v in volumes: if v.startswith(prefix): config_volume = os.path.relpath(", "config_volume) return subproc.returncode # Holds all the information for each", "config-data dir, this is used as a # salt to", "config_volumes = [pm[0] for pm in process_map] success = True", "we can't just use the # key as e.g \"novacomute\"", "$rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \\ / /var/lib/config-data/puppet-generated/${NAME} # Write a", "tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w') as man_file: man_file.write('include ::tripleo::packages\\n')", "/etc/puppet rm -Rf /etc/puppet/ssl # not in use and causes", "infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v) if config_volume: config_hash = get_config_hash(config_volume_prefix,", "service[2] or '' config_image = service[3] or '' volumes =", "v.split(\":\")[0], prefix).split(\"/\")[0] break return config_volume def get_config_hash(prefix, config_volume): hashfile =", "puppet_tags) log.debug('manifest %s' % manifest) log.debug('config_image %s' % config_image) log.debug('volumes", "ch.setFormatter(formatter) log.addHandler(ch) # this is to match what we do", "as this should be the same for a # given", "rsync_srcs+=\" $d\" fi done rsync -a -R --delay-updates --delete-after $rsync_srcs", "from daemon: ' \\ 'No such container: {}\\n'.format(name): log.debug(cmd_stderr) process_count", "os.environ.get('NET_HOST', 'false') == 'true': log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro'])", "hash for %s, config_volume=%s hash=%s\" % (k, config_volume, config_hash)) infile_data[k]['environment']", "puppet_tags = \"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for p", "You may obtain # a copy of the License at", "= os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN',", "os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for", "consumes config-data/nova volumes = config.get('volumes', []) config_volume=None for v in", "-p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl #", "manifest, config_image, volumes)): log.debug('config_volume %s' % config_volume) log.debug('puppet_tags %s' %", "in use and causes permission errors echo \"{\\\\\"step\\\\\": $STEP}\" >", "subproc.communicate() if subproc.returncode != 0: log.error('Failed running docker-puppet.py for %s'", "to consume. # Instead of starting them all linearly we", "volumes = config.get('volumes', []) config_volume=None for v in volumes: if", "# given group of services. We are also now specifying", "'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout:", "use the # key as e.g \"novacomute\" consumes config-data/nova volumes", "= v.get('environment', []) env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash) log.debug(\"Updating config hash for", "'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',", "service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []), ] config_volume = service[0] or", "This is useful for debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync -a", "the docker daemon. for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):", "the License. # Shell script tool to run puppet inside", "merge shared settings together here. # # We key off", "/var/lib/config-data/${NAME}.md5sum fi \"\"\") with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w')", "will save some time. # To support this we merge", "manifest or not config_image: continue log.info('config_volume %s' % config_volume) log.info('puppet_tags", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "'false') == 'true': log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image)", "import subprocess import sys import tempfile import multiprocessing log =", "False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s:", "function # to consume. process_map = [] for config_volume in", "config_volume) hash_data = None if os.path.isfile(hashfile): with open(hashfile) as f:", "return config_volume def get_config_hash(prefix, config_volume): hashfile = os.path.join(prefix, \"%s.md5sum\" %", "enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker command: %s'", "# to consume. process_map = [] for config_volume in configs:", "# salt to trigger container restart when the config changes", "k, v in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v) if config_volume:", "%s' % config_volume) log.debug('puppet_tags %s' % puppet_tags) log.debug('manifest %s' %", "%s' % p) # Fire off processes to perform each", "processes to perform each configuration. Defaults # to the number", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "config_image) log.info('volumes %s' % volumes) # We key off of", "config_hash) log.debug(\"Updating config hash for %s, config_volume=%s hash=%s\" % (k,", "== 'true': log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running", "% config_volume) if cmd_stdout: log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr) else: if", "= True for returncode, config_volume in zip(returncodes, config_volumes): if returncode", "-0 --delay-updates --delete-after \\ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \\", "if os.environ.get('NET_HOST', 'false') == 'true': log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume',", "example configuring all of the heat services # in a", "image: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE,", "FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp # Disables archiving if", "subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if", "# in a single container pass makes sense and will", "def short_hostname(): subproc = subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr", "a list of arguments for the above function # to", "TAGS=\"\" if [ -n \"$PUPPET_TAGS\" ]; then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi", "then rsync_srcs+=\" $d\" fi done rsync -a -R --delay-updates --delete-after", "the access data for the docker daemon. for k in", "shared settings together here. # # We key off of", "-a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl # not in use", "log.debug('volumes %s' % volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w')", "volume - we can't just use the # key as", "True for returncode, config_volume in zip(returncodes, config_volumes): if returncode !=", "{} for service in (json_data or []): if service is", "subproc = subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate()", "def pull_image(name): log.info('Pulling image: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker',", "config_volume) success = False # Update the startup configs with", "= service[0] or '' puppet_tags = service[1] or '' manifest", "os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file) with open(config_file) as f:", "system. p = multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes =", "cmd_stdout: log.debug(cmd_stdout) if cmd_stderr and \\ cmd_stderr != 'Error response", "= os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0] break return config_volume def get_config_hash(prefix, config_volume):", "puppet_tags else: puppet_tags = \"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])", "os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s", "off processes to perform each configuration. Defaults # to the", "log.info('manifest %s' % manifest) log.info('config_image %s' % config_image) log.info('volumes %s'", "can be used to generate config files or run ad-hoc", "= [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []), ] config_volume", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "echo \"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json TAGS=\"\" if [ -n \"$PUPPET_TAGS\"", "return cmd_stdout.rstrip() def pull_image(name): log.info('Pulling image: %s' % name) subproc", "[ -n \"$PUPPET_TAGS\" ]; then TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi # workaround", "'--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',", "+ os.path.basename(infile)) with open(outfile, 'w') as out_f: json.dump(infile_data, out_f) if", "env=env) cmd_stdout, cmd_stderr = subproc.communicate() if subproc.returncode != 0: log.error('Failed", "\"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\" for d in \"${archivedirs[@]}\"; do if", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "| awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum fi \"\"\") with tempfile.NamedTemporaryFile()", "list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0] for pm in process_map] success", "service[4] if len(service) > 4 else [] if not manifest", "for a # given group of services. We are also", "permission errors echo \"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json TAGS=\"\" if [", "len(service) > 4 else [] if puppet_tags: puppet_tags = \"file,file_line,concat,augeas,%s\"", "subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr =", "container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE,", "for debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync -a -R -0 --delay-updates", "container pass makes sense and will save some time. #", "in zip(returncodes, config_volumes): if returncode != 0: log.error('ERROR configuring %s'", "together here. # # We key off of config_volume as", "KIND, either express or implied. See the # License for", "salt to trigger container restart when the config changes tar", "config_volume, '--env', 'HOSTNAME=%s' % short_hostname(), '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),", "-z \"$NO_ARCHIVE\" ]; then archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\"", "do in deployed-server def short_hostname(): subproc = subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE,", "files modified during puppet run # This is useful for", "# to the number of CPUs on the system. p", "puppet_tags, manifest, config_image, [volumes]] settings # that can be used", "settings # that can be used to generate config files", "[] if puppet_tags: puppet_tags = \"file,file_line,concat,augeas,%s\" % puppet_tags else: puppet_tags", "!= 0: log.error('Failed running docker-puppet.py for %s' % config_volume) if", "config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs =", "group of services. We are also now specifying the container", "%s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "daemon. for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): env[k] =", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "use and causes permission errors echo \"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json", "%s' % config_volume) if cmd_stdout: log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr) else:", "% config_volume) success = False # Update the startup configs", "service log.info('Service compilation completed.') def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):", "all linearly we run them using a process # pool.", "same. configs = {} for service in (json_data or []):", "'No such container: {}\\n'.format(name): log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running", "'/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',", "starting them all linearly we run them using a process", "log.debug('manifest %s' % manifest) log.debug('config_image %s' % config_image) log.debug('volumes %s'", "# Fire off processes to perform each configuration. Defaults #", "break return config_volume def get_config_hash(prefix, config_volume): hashfile = os.path.join(prefix, \"%s.md5sum\"", "settings together here. # # We key off of config_volume", "implied. See the # License for the specific language governing", "= service[3] or '' volumes = service[4] if len(service) >", "the given docker container image. # Uses the config file", "configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2] =", "process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for p in process_map: log.debug('-", "TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\" fi # workaround LP1696283 mkdir -p /etc/ssh touch", "configs. if config_volume in configs: # Append puppet tags and", "Always copy the DOCKER_* environment variables as # they contain", "log.debug('puppet_tags %s' % puppet_tags) log.debug('manifest %s' % manifest) log.debug('config_image %s'", "if puppet_tags: puppet_tags = \"file,file_line,concat,augeas,%s\" % puppet_tags else: puppet_tags =", "> 4 else [] if puppet_tags: puppet_tags = \"file,file_line,concat,augeas,%s\" %", "'' manifest = service[2] or '' config_image = service[3] or", "can't just use the # key as e.g \"novacomute\" consumes", "log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')", "int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s'", "= multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0] for", "# This is useful for debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync", "makes sense and will save some time. # To support", "run puppet inside of the given docker container image. #", "debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync -a -R -0 --delay-updates --delete-after", "None: continue if isinstance(service, dict): service = [ service.get('config_volume'), service.get('puppet_tags'),", "\\\"$PUPPET_TAGS\\\"\" fi # workaround LP1696283 mkdir -p /etc/ssh touch /etc/ssh/ssh_known_hosts", "'%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume',", "# Append puppet tags and manifest. log.info(\"Existing service, appending puppet", "process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')", "config_file) with open(config_file) as f: json_data = json.load(f) # To", "This creates a list of arguments for the above function", "= env outfile = os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile)) with open(outfile,", "continue if isinstance(service, dict): service = [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'),", "= \"file,file_line,concat,augeas\" process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for p in", "obtain # a copy of the License at # #", "else [] if not manifest or not config_image: continue log.info('config_volume", "v in volumes: if v.startswith(prefix): config_volume = os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0]", "for k, v in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v) if", "pool. This creates a list of arguments for the above", "subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix, config):", "\\ cmd_stderr != 'Error response from daemon: ' \\ 'No", "should be configured. This should match # in all instances", "hashfile = os.path.join(prefix, \"%s.md5sum\" % config_volume) hash_data = None if", "dcmd.append(config_image) log.debug('Running docker command: %s' % ' '.join(dcmd)) subproc =", "log.info(\"Adding new service\") configs[config_volume] = service log.info('Service compilation completed.') def", "to match what we do in deployed-server def short_hostname(): subproc", "'/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection '--volume', '%s:%s:rw'", "if v.startswith(prefix): config_volume = os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0] break return config_volume", "name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr", "v.startswith(prefix): config_volume = os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0] break return config_volume def", "def match_config_volume(prefix, config): # Match the mounted config volume -", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "config_image = service[3] or '' volumes = service[4] if len(service)", "docker command: %s' % ' '.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,", "$1}' > /var/lib/config-data/${NAME}.md5sum fi \"\"\") with tempfile.NamedTemporaryFile() as tmp_man: with", "trigger container restart when the config changes tar -c -f", "dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker command: %s' %", "# this is to match what we do in deployed-server", "DOCKER_* environment variables as # they contain the access data", "the above function # to consume. process_map = [] for", "in volumes: if v.startswith(prefix): config_volume = os.path.relpath( v.split(\":\")[0], prefix).split(\"/\")[0] break", "else [] if puppet_tags: puppet_tags = \"file,file_line,concat,augeas,%s\" % puppet_tags else:", "of config_volume as this should be the same for a", "should match # in all instances where the volume name", "' '.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr", "[]) env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash) log.debug(\"Updating config hash for %s, config_volume=%s", "Fire off processes to perform each configuration. Defaults # to", "2.0 (the \"License\"); you may # not use this file", "f: hash_data = f.read().rstrip() return hash_data def rm_container(name): if os.environ.get('SHOW_DIFF',", "cmd_stdout: log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr) else: if cmd_stdout: log.debug(cmd_stdout) if", "%s' % puppet_tags) log.debug('manifest %s' % manifest) log.debug('config_image %s' %", "{}\\n'.format(name): log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file =", "by applicable law or agreed to in writing, software #", "running docker-puppet.py for %s' % config_volume) if cmd_stdout: log.error(cmd_stdout) if", "the system. p = multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes", "in which the services should be configured. This should match", "manifest = service[2] or '' config_image = service[3] or ''", "# To support this we merge shared settings together here.", "isinstance(service, dict): service = [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes',", "0755) script_file.write(\"\"\"#!/bin/bash set -ex mkdir -p /etc/puppet cp -a /tmp/puppet-etc/*", "a source for a JSON # array of [config_volume, puppet_tags,", "not in use and causes permission errors echo \"{\\\\\"step\\\\\": $STEP}\"", "puppet modules # inside of a container. import glob import", "of CPUs on the system. p = multiprocessing.Pool(process_count) returncodes =", "volumes)): log.debug('config_volume %s' % config_volume) log.debug('puppet_tags %s' % puppet_tags) log.debug('manifest", "match_config_volume(config_volume_prefix, v) if config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume) if config_hash:", "mkdir -p /etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose", "service[0] or '' puppet_tags = service[1] or '' manifest =", "multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0] for pm", "restart when the config changes tar -c -f - /var/lib/config-data/${NAME}", "/var/lib/docker-puppet/docker-puppet.json as a source for a JSON # array of", "cmd_stdout, cmd_stderr = subproc.communicate() return cmd_stdout.rstrip() def pull_image(name): log.info('Pulling image:", "a # given group of services. We are also now", "the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a", "applicable law or agreed to in writing, software # distributed", "sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755)", "services at the same # time. For example configuring all", "new service\") configs[config_volume] = service log.info('Service compilation completed.') def mp_puppet_config((config_volume,", "# time. For example configuring all of the heat services", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "log = logging.getLogger() ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG)", "service.get('config_image'), service.get('volumes', []), ] config_volume = service[0] or '' puppet_tags", "'' puppet_tags = service[1] or '' manifest = service[2] or", "or '' config_image = service[3] or '' volumes = service[4]", "cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) log.info('Removing container: %s' % name)", "puppet_tags: puppet_tags = \"file,file_line,concat,augeas,%s\" % puppet_tags else: puppet_tags = \"file,file_line,concat,augeas\"", "consume. process_map = [] for config_volume in configs: service =", "with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w') as man_file: man_file.write('include", "subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr and \\ cmd_stderr !=", "'shared' services at the same # time. For example configuring", "log.info('volumes %s' % volumes) # We key off of config", "os.path.isfile(hashfile): with open(hashfile) as f: hash_data = f.read().rstrip() return hash_data", "log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker command:", "This should match # in all instances where the volume", "if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) log.info('Removing container: %s' %", "LP1696283 mkdir -p /etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply", "open(outfile, 'w') as out_f: json.dump(infile_data, out_f) if not success: sys.exit(1)", "or run ad-hoc puppet modules # inside of a container.", "# License for the specific language governing permissions and limitations", "as script_file: os.chmod(script_file.name, 0755) script_file.write(\"\"\"#!/bin/bash set -ex mkdir -p /etc/puppet", "if cmd_stderr and \\ cmd_stderr != 'Error response from daemon:", "archivedirs=(\"/etc\" \"/root\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\") rsync_srcs=\"\" for d in \"${archivedirs[@]}\";", "key as e.g \"novacomute\" consumes config-data/nova volumes = config.get('volumes', [])", "for v in volumes: if v.startswith(prefix): config_volume = os.path.relpath( v.split(\":\")[0],", "puppet_tags, '--env', 'NAME=%s' % config_volume, '--env', 'HOSTNAME=%s' % short_hostname(), '--env',", "man_file.write('include ::tripleo::packages\\n') man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume) pull_image(config_image) dcmd = ['/usr/bin/docker',", "config_image, [volumes]] settings # that can be used to generate", "if [ -d \"$d\" ]; then rsync_srcs+=\" $d\" fi done", "if puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags) if manifest:", "configs with the config hash we generated above config_volume_prefix =", "docker-puppet.py for %s' % config_volume) if cmd_stdout: log.error(cmd_stdout) if cmd_stderr:", "where the volume name is also the same. configs =", "response from daemon: ' \\ 'No such container: {}\\n'.format(name): log.debug(cmd_stderr)", "# To save time we support configuring 'shared' services at", "for returncode, config_volume in zip(returncodes, config_volumes): if returncode != 0:", "[]), ] config_volume = service[0] or '' puppet_tags = service[1]", "log.debug(cmd_stderr) log.info('Removing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm',", "p) # Fire off processes to perform each configuration. Defaults", "array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings # that", "License. You may obtain # a copy of the License", "env[k] = os.environ.get(k) if os.environ.get('NET_HOST', 'false') == 'true': log.debug('NET_HOST enabled')", "ANY KIND, either express or implied. See the # License", "subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if", "time. For example configuring all of the heat services #", "key off of config_volume as this should be the same", "arguments for the above function # to consume. process_map =", "volume name is also the same. configs = {} for", "tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', #", "/etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp # Disables", "as a source for a JSON # array of [config_volume,", "for the above function # to consume. process_map = []", "formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this is", "log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix, config): # Match the", "files or run ad-hoc puppet modules # inside of a", "cmd_stdout, cmd_stderr = subproc.communicate() if subproc.returncode != 0: log.error('Failed running", "% (k, config_volume, config_hash)) infile_data[k]['environment'] = env outfile = os.path.join(os.path.dirname(infile),", "or implied. See the # License for the specific language" ]
[ "Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' }) def get_url(**kwargs): \"\"\"", "is_folder = False # Add our item to the Kodi", "# Add the list item to a virtual Kodi folder.", "content. It allows Kodi to select appropriate views # for", "is_folder = False means that this item won't open any", "False means that this item won't open any sub-list. is_folder", "provided paramstring :param paramstring: URL encoded plugin paramstring :type paramstring:", "we use the same image for all items for simplicity's", "'icon': \"icon.png\", 'fanart': \"icon.png\"}) # Set additional info for the", "etc.) from some site or server. .. note:: Consider using", "import urllib import urlparse # import xbmc import xbmcgui import", "# Create a URL for a plugin recursive call. #", "from main cable.\") elif params['category'] == \"movies\": ATV.update_aci_movies() print(\"Updated from", "item with a path to play. play_item = xbmcgui.ListItem(path=path) #", "video files/streams. Here you can insert some parsing code that", "if params: if params['action'] == 'listing': # Load the videos", "of keyword arguments. :param kwargs: \"argument=value\" pairs :type kwargs: dict", "Here you can insert some parsing code that retrieves the", "is called from Kodi UI without any parameters, # display", "plugin://plugin.video.example/?action=play& # video=[video url] url = get_url(action='play', video=video_item['url']) # video_url", "from a provided URL. play_video(params['video']) else: # If the provided", "'genre': category.title(), 'mediatype': 'video'}) # Set graphics (thumbnail, fanart, banner,", "-*- coding: utf-8 -*- import sys import urllib import urlparse", "'Documentaries' etc.) from some site or server. .. note:: Consider", "+ \\ # urllib.quote_plus(video['reference']) # url = get_url(action='play', video=video_url) #", "video_item = videos[video_id] # Create a list item with a", "that this item won't open any sub-list. is_folder = False", "raise an exception. This helps to catch coding errors, #", "property to 'true'. # This is mandatory for playable items!", "through each video. for video_id in videos: # Get the", "provided category. list_videos(params['category']) elif params['action'] == 'play': # Play a", "Category name :type category: str :return: the list of videos", ":type kwargs: dict :return: plugin call URL :rtype: str \"\"\"", ".. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_ instead of returning", "\"\"\" Create the list of playable videos in the Kodi", "Check the parameters passed to the plugin if params: if", "WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\", "correctly. list_item.setInfo('video', {'title': video_item[\"title\"], 'genre': category.title(), 'mediatype': 'video'}) # Set", "{'title': category.title(), 'genre': category.title(), 'mediatype': 'video'}) # Create a URL", "{0}!'.format(paramstring)) else: # Load ATV. ATV.load_aci() # If the plugin", "# Set plugin category. It is displayed in some skins", "print(\"Updated from main movies.\") # Display the list of videos", "notation. _url = sys.argv[0] # Get the plugin handle as", "url = get_url(action=\"listing\", category=category) # is_folder = True means that", "# Play with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') #", "Category name :type category: str \"\"\" # Set plugin category.", "as an integer number. _handle = int(sys.argv[1]) # Get an", "Get video categories categories = get_categories() # Iterate through categories", "the name # of the current section. xbmcplugin.setPluginCategory(_handle, category) #", "list_item.setInfo('video', {'title': video_item[\"title\"], 'genre': category.title(), 'mediatype': 'video'}) # Set graphics", "video=video_url) # Add the list item to a virtual Kodi", "list_videos(params['category']) elif params['action'] == 'play': # Play a video from", "accordingly. list_item.setArt({'thumb': \"icon.png\", 'icon': \"icon.png\", 'fanart': \"icon.png\"}) # Set additional", "\\ # urllib.quote_plus(video['reference']) # url = get_url(action='play', video=video_url) # Add", "through categories for category in categories: # xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) #", "plugin call URL :rtype: str \"\"\" return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def", "Iterate through categories for category in categories: # xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE)", "info for the list item. # 'mediatype' is needed for", "referer_header) # Create a URL for a plugin recursive call.", "plugin paramstring :type paramstring: str \"\"\" # Parse a URL-encoded", "image accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"] }) #", "virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a", "categories. Here you can insert some parsing code that retrieves", "playable items! list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url'] +=", "router function and pass the plugin call parameters to it.", "sys import urllib import urlparse # import xbmc import xbmcgui", "URL. play_video(params['video']) else: # If the provided paramstring does not", "image. list_item = xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail, fanart, banner,", "str \"\"\" # Create a playable item with a path", "information for an item. # For available properties see the", "site or server. .. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_", "= get_url(action=\"listing\", category=category) # is_folder = True means that this", "sake. # In a real-life plugin you need to set", "If the provided paramstring does not contain a supported action", "return ATV.aci.iterkeys() def get_videos(category): \"\"\" Get the list of video", "list of video categories in the Kodi interface. \"\"\" #", "to it. # We use string slicing to trim the", "a category name for both properties for for simplicity's sake.", ":return: the list of videos in the category :rtype: list", "URL-encoded paramstring to the dictionary of # {<parameter>: <value>} elements", "aci # Get the plugin url in plugin:// notation. _url", "= xbmcgui.ListItem(path=path) # Play with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type',", "the list of videos in the category. videos = get_videos(category)", "from some site or server. .. note:: Consider using `generator", "ATV.load_aci() # Encode user agent headers for video. user_agent_headers =", "xbmcplugin.setContent(_handle, 'videos') # Get video categories categories = get_categories() #", "'icon': video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"] }) # Set 'IsPlayable' property to", "the plugin url in plugin:// notation. _url = sys.argv[0] #", "\\ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \\ # '&amp;|User-Agent=Mozilla/5.0 (Windows", "'|%s&amp;%s' % (user_agent_headers, referer_header) # Create a URL for a", ":param category: Category name :type category: str \"\"\" # Set", "= True # Add our item to the Kodi virtual", "+= '|%s&amp;%s' % (user_agent_headers, referer_header) # Create a URL for", "the Kodi interface. :param category: Category name :type category: str", "for aci. if params['category'] == \"shows\": ATV.update_aci_shows() print(\"Updated from main", "section. xbmcplugin.setPluginCategory(_handle, category) # Set plugin content. It allows Kodi", "ATV.load_aci() # If the plugin is called from Kodi UI", "play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the item to the Kodi player.", "for this ListItem correctly. list_item.setInfo('video', {'title': video_item[\"title\"], 'genre': category.title(), 'mediatype':", "in a provided category. list_videos(params['category']) elif params['action'] == 'play': #", "category) # Set plugin content. It allows Kodi to select", "list_item = xbmcgui.ListItem(label=video_item[\"title\"]) # Set additional info for the list", "# Example: plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action=\"listing\", category=category) # is_folder", "# 'mediatype' is needed for skin to display info for", "to the plugin if params: if params['action'] == 'listing': #", "playable item with a path to play. play_item = xbmcgui.ListItem(path=path)", "this item won't open any sub-list. is_folder = False #", "for video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64;", "calling the plugin recursively from the given set of keyword", "for this type of content. xbmcplugin.setContent(_handle, 'videos') # Get video", "video URL :type path: str \"\"\" # Create a playable", "to set each image accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"], 'fanart':", "other functions depending on the provided paramstring :param paramstring: URL", "xbmc.LOGNOTICE) # Create a list item with a text label", "to set various information for an item. # For available", "Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring): \"\"\" Router function", "display info for this ListItem correctly. list_item.setInfo('video', {'title': category.title(), 'genre':", "urlparse # import xbmc import xbmcgui import xbmcplugin import aci", "dictionary of # {<parameter>: <value>} elements params = dict(urlparse.parse_qsl(paramstring)) #", "get_url(action='play', video=video_url) # Add the list item to a virtual", "False # Add our item to the Kodi virtual folder", "video categories. Here you can insert some parsing code that", "item to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item,", "xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category):", "name # of the current section. xbmcplugin.setPluginCategory(_handle, category) # Set", "ATV.aci.iterkeys() def get_videos(category): \"\"\" Get the list of video files/streams.", "parsing code that retrieves the list of video streams in", "category: Category name :type category: str :return: the list of", "of playable videos in the Kodi interface. :param category: Category", "to display info for this ListItem correctly. list_item.setInfo('video', {'title': category.title(),", "= xbmcgui.ListItem(label=video_item[\"title\"]) # Set additional info for the list item.", "{<parameter>: <value>} elements params = dict(urlparse.parse_qsl(paramstring)) # Check the parameters", "files/streams. Here you can insert some parsing code that retrieves", "# Call the router function and pass the plugin call", "an exception. This helps to catch coding errors, # e.g.", "' \\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\ # urllib.quote_plus(video['reference']) # url", "passed to the plugin if params: if params['action'] == 'listing':", "url, list_item, is_folder) # Add a sort method for the", "all items for simplicity's sake. # In a real-life plugin", "to catch coding errors, # e.g. typos in action names.", "It is displayed in some skins as the name #", "category.title(), 'genre': category.title(), 'mediatype': 'video'}) # Create a URL for", "# Set additional info for the list item. # Here", "note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists.", "folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a sort", "the virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) #", "listitem=play_item) def router(paramstring): \"\"\" Router function that calls other functions", "Here we use a category name for both properties for", "Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the", "content. xbmcplugin.setContent(_handle, 'videos') # Get the list of videos in", "name for both properties for for simplicity's sake. # setInfo", "aci. if params['category'] == \"shows\": ATV.update_aci_shows() print(\"Updated from main shows.\")", "xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring): \"\"\" Router function that calls", "(Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With':", "means that this item opens a sub-list of lower level", "print(\"Updated from main cable.\") elif params['category'] == \"movies\": ATV.update_aci_movies() print(\"Updated", "it. # We use string slicing to trim the leading", "Create a URL for a plugin recursive call. # Example:", "params['action'] == 'listing': # Load the videos for aci. if", "list item. # Here we use a category name for", ":return: plugin call URL :rtype: str \"\"\" return '{0}?{1}'.format(_url, urllib.urlencode(kwargs))", "in the given category from some site or server. ..", "categories: # xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) # Create a list item with", "play. play_item = xbmcgui.ListItem(path=path) # Play with inputstream addon. play_item.setProperty('inputstreamaddon',", "functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :param category: Category name", "some site or server. .. note:: Consider using `generators functions", "play_item = xbmcgui.ListItem(path=path) # Play with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')", "ATV.update_aci_shows() print(\"Updated from main shows.\") elif params['category'] == \"cable\": ATV.update_aci_cable()", "\"\"\" return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): \"\"\" Get the list", "\"\"\" Create the list of video categories in the Kodi", "Pass the item to the Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)", "the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed for", "plugin recursive call. # Example: plugin://plugin.video.example/?action=play& # video=[video url] url", "content. xbmcplugin.setContent(_handle, 'videos') # Get video categories categories = get_categories()", "provided paramstring does not contain a supported action # we", "# Create a playable item with a path to play.", "the parameters passed to the plugin if params: if params['action']", "aci.ACI() ATV.load_aci() # Encode user agent headers for video. user_agent_headers", "type of content. xbmcplugin.setContent(_handle, 'videos') # Get the list of", "rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' }) def get_url(**kwargs):", "a path to play. play_item = xbmcgui.ListItem(path=path) # Play with", "the plugin recursively from the given set of keyword arguments.", "'true'. # This is mandatory for playable items! list_item.setProperty('IsPlayable', 'true')", "image accordingly. list_item.setArt({'thumb': \"icon.png\", 'icon': \"icon.png\", 'fanart': \"icon.png\"}) # Set", "item. # For available properties see the following link: #", "xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin content. It allows Kodi to", "= sys.argv[0] # Get the plugin handle as an integer", "video categories in the Kodi interface. \"\"\" # Set plugin", "utf-8 -*- import sys import urllib import urlparse # import", "virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): \"\"\" Play a video by", "list of video files/streams. Here you can insert some parsing", "from Kodi UI without any parameters, # display the list", "Example: plugin://plugin.video.example/?action=play& # video=[video url] url = get_url(action='play', video=video_item['url']) #", "plugin is called from Kodi UI without any parameters, #", "'__main__': # Call the router function and pass the plugin", "(thumbnail, fanart, banner, poster, landscape etc.) for the list item.", "items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a", "# Add a sort method for the virtual folder items", "use the same image for all items for simplicity's sake.", "category.title(), 'mediatype': 'video'}) # Create a URL for a plugin", "'IsPlayable' property to 'true'. # This is mandatory for playable", "Gecko/20100101 Firefox/47.0 ' \\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\ # urllib.quote_plus(video['reference'])", "main cable.\") elif params['category'] == \"movies\": ATV.update_aci_movies() print(\"Updated from main", "xbmcgui import xbmcplugin import aci # Get the plugin url", "\"\"\" Get the list of video categories. Here you can", "a provided category. list_videos(params['category']) elif params['action'] == 'play': # Play", "the list of videos in a provided category. list_videos(params['category']) elif", "as the name # of the current section. xbmcplugin.setPluginCategory(_handle, 'ACI')", "if __name__ == '__main__': # Call the router function and", "some skins as the name # of the current section.", "category=category) # is_folder = True means that this item opens", "Example: plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action=\"listing\", category=category) # is_folder =", "videos for aci. if params['category'] == \"shows\": ATV.update_aci_shows() print(\"Updated from", "video_id in videos: # Get the video item to process.", "item. # Here we use the same image for all", "the list of video files/streams. Here you can insert some", "of video streams in the given category from some site", "recursively from the given set of keyword arguments. :param kwargs:", "interface. :param category: Category name :type category: str \"\"\" #", "list of playable videos in the Kodi interface. :param category:", "any parameters, # display the list of video categories list_categories()", "each image accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"] })", "we use a category name for both properties for for", "list of video categories. Here you can insert some parsing", "section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin content. It allows Kodi", "server. .. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_ instead of", "a virtual Kodi folder. # is_folder = False means that", "with a text label and a thumbnail image. list_item =", "the list of video categories list_categories() if __name__ == '__main__':", "str \"\"\" # Parse a URL-encoded paramstring to the dictionary", "set each image accordingly. list_item.setArt({'thumb': \"icon.png\", 'icon': \"icon.png\", 'fanart': \"icon.png\"})", "print(\"Updated from main shows.\") elif params['category'] == \"cable\": ATV.update_aci_cable() print(\"Updated", "import aci # Get the plugin url in plugin:// notation.", "URL for calling the plugin recursively from the given set", "name :type category: str \"\"\" # Set plugin category. It", "xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path):", "parsing code that retrieves the list of video categories (e.g.", "for the list item. # 'mediatype' is needed for skin", "text label and a thumbnail image. list_item = xbmcgui.ListItem(label=category.title()) #", "for the virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)", "paramstring: {0}!'.format(paramstring)) else: # Load ATV. ATV.load_aci() # If the", "string slicing to trim the leading '?' from the plugin", "won't open any sub-list. is_folder = False # Add our", "rv:47.0) Gecko/20100101 Firefox/47.0 ' \\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\ #", "real-life plugin you need to set each image accordingly. list_item.setArt({'thumb':", "a URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=play&", "user agent headers for video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows", "streams in the given category from some site or server.", "In a real-life plugin you need to set each image", "# Add our item to the Kodi virtual folder listing.", "item to process. video_item = videos[video_id] # Create a list", "# we raise an exception. This helps to catch coding", "set of keyword arguments. :param kwargs: \"argument=value\" pairs :type kwargs:", "folder. # is_folder = False means that this item won't", "image for all items for simplicity's sake. # In a", "import xbmcgui import xbmcplugin import aci # Get the plugin", "# https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed for a skin to", "to play. play_item = xbmcgui.ListItem(path=path) # Play with inputstream addon.", "xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) # Create a list item with a text", "# Encode user agent headers for video. user_agent_headers = urllib.urlencode({'User-Agent':", "https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed for a skin to display", "articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle)", "# This is mandatory for playable items! list_item.setProperty('IsPlayable', 'true') referer_header", ":type category: str :return: the list of videos in the", "of video files/streams. Here you can insert some parsing code", "to a virtual Kodi folder. # is_folder = False means", "= False # Add our item to the Kodi virtual", "'TV-shows', 'Documentaries' etc.) from some site or server. .. note::", "info for this ListItem correctly. list_item.setInfo('video', {'title': category.title(), 'genre': category.title(),", "e.g. typos in action names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else:", "# Load ATV. ATV.load_aci() # If the plugin is called", "\"icon.png\", 'icon': \"icon.png\", 'fanart': \"icon.png\"}) # Set additional info for", "'&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \\", "a URL for calling the plugin recursively from the given", "playable videos in the Kodi interface. :param category: Category name", "raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: # Load ATV. ATV.load_aci() #", "video. for video_id in videos: # Get the video item", "some parsing code that retrieves the list of video streams", "We use string slicing to trim the leading '?' from", "url] url = get_url(action='play', video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url=' +", "the current section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin content. It", "# is_folder = False means that this item won't open", "a thumbnail image. list_item = xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail,", "\"icon.png\"}) # Set additional info for the list item. #", "video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \\ # '&amp;streamtype=HLSRETRY&name=' +", "'hls') # Pass the item to the Kodi player. xbmcplugin.setResolvedUrl(_handle,", "= xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail, fanart, banner, poster, landscape", "a real-life plugin you need to set each image accordingly.", "'mediatype': 'video'}) # Create a URL for a plugin recursive", "# If the plugin is called from Kodi UI without", "'videos') # Get the list of videos in the category.", "of content. xbmcplugin.setContent(_handle, 'videos') # Get the list of videos", "current section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin content. It allows", "router(paramstring): \"\"\" Router function that calls other functions depending on", "site or server. .. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_", "a text label and a thumbnail image. list_item = xbmcgui.ListItem(label=video_item[\"title\"])", "# is_folder = True means that this item opens a", "import xbmcplugin import aci # Get the plugin url in", "`generators functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :param category: Category", "in some skins as the name # of the current", "kwargs: dict :return: plugin call URL :rtype: str \"\"\" return", "return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): \"\"\" Get the list of", "urllib.quote_plus(video['name']) + \\ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0)", "Play a video by the provided path. :param path: Fully-qualified", "if params['category'] == \"shows\": ATV.update_aci_shows() print(\"Updated from main shows.\") elif", "play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the item to the", "# Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): \"\"\"", "get_categories(): \"\"\" Get the list of video categories. Here you", "'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\ # urllib.quote_plus(video['reference']) # url = get_url(action='play', video=video_url)", "plugin:// notation. _url = sys.argv[0] # Get the plugin handle", "display info for this ListItem correctly. list_item.setInfo('video', {'title': video_item[\"title\"], 'genre':", "params['category'] == \"movies\": ATV.update_aci_movies() print(\"Updated from main movies.\") # Display", "= int(sys.argv[1]) # Get an instance of ACI. ATV =", "% (user_agent_headers, referer_header) # Create a URL for a plugin", "URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=play& #", ".. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_ instead of returning", "ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: # Load ATV. ATV.load_aci() # If", "def router(paramstring): \"\"\" Router function that calls other functions depending", "# For available properties see the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14", "== \"movies\": ATV.update_aci_movies() print(\"Updated from main movies.\") # Display the", "provided URL. play_video(params['video']) else: # If the provided paramstring does", "of content. xbmcplugin.setContent(_handle, 'videos') # Get video categories categories =", "code that retrieves the list of video streams in the", "+ \\ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \\ # '&amp;|User-Agent=Mozilla/5.0", "not contain a supported action # we raise an exception.", "Get the plugin url in plugin:// notation. _url = sys.argv[0]", "accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"] }) # Set", "the same image for all items for simplicity's sake. #", "paramstring: URL encoded plugin paramstring :type paramstring: str \"\"\" #", "ATV = aci.ACI() ATV.load_aci() # Encode user agent headers for", "dict(urlparse.parse_qsl(paramstring)) # Check the parameters passed to the plugin if", "returning lists. :return: The list of video categories :rtype: types.GeneratorType", "categories list_categories() if __name__ == '__main__': # Call the router", "Create a playable item with a path to play. play_item", "to the Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring): \"\"\"", "depending on the provided paramstring :param paramstring: URL encoded plugin", "pairs :type kwargs: dict :return: plugin call URL :rtype: str", "both properties for for simplicity's sake. # setInfo allows to", "\"movies\": ATV.update_aci_movies() print(\"Updated from main movies.\") # Display the list", "our item to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url,", "item to a virtual Kodi folder. # is_folder = False", "plugin recursively from the given set of keyword arguments. :param", "Set plugin content. It allows Kodi to select appropriate views", "lower level items. is_folder = True # Add our item", "elif params['category'] == \"movies\": ATV.update_aci_movies() print(\"Updated from main movies.\") #", "of video categories. Here you can insert some parsing code", "Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): \"\"\" Create", "urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url'] += '|%s&amp;%s' % (user_agent_headers, referer_header) # Create", "a sub-list of lower level items. is_folder = True #", "the video item to process. video_item = videos[video_id] # Create", "'video'}) # Create a URL for a plugin recursive call.", "display the list of video categories list_categories() if __name__ ==", "category: Category name :type category: str \"\"\" # Set plugin", "'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' }) def get_url(**kwargs): \"\"\" Create a URL for", "the category :rtype: list \"\"\" return ATV.aci[category] def list_categories(): \"\"\"", "plugin handle as an integer number. _handle = int(sys.argv[1]) #", "Set plugin category. It is displayed in some skins as", "Load ATV. ATV.load_aci() # If the plugin is called from", "you need to set each image accordingly. list_item.setArt({'thumb': \"icon.png\", 'icon':", "path: Fully-qualified video URL :type path: str \"\"\" # Create", "that calls other functions depending on the provided paramstring :param", "helps to catch coding errors, # e.g. typos in action", "does not contain a supported action # we raise an", "video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.) from some site", "'mediatype' is needed for skin to display info for this", "(user_agent_headers, referer_header) # Create a URL for a plugin recursive", "views # for this type of content. xbmcplugin.setContent(_handle, 'videos') #", "xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def", "some site or server. .. note:: Consider using `generator functions", "calls other functions depending on the provided paramstring :param paramstring:", "see the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed", "the given category from some site or server. .. note::", "retrieves the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries'", "plugin recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action=\"listing\",", "\"\"\" Get the list of video files/streams. Here you can", "the provided paramstring does not contain a supported action #", "of ACI. ATV = aci.ACI() ATV.load_aci() # Encode user agent", "for all items for simplicity's sake. # In a real-life", "various information for an item. # For available properties see", "NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer='", "def get_categories(): \"\"\" Get the list of video categories. Here", "Encode user agent headers for video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0", "\\ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0", "# url = get_url(action='play', video=video_url) # Add the list item", "virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish", "image. list_item = xbmcgui.ListItem(label=video_item[\"title\"]) # Set additional info for the", "correctly. list_item.setInfo('video', {'title': category.title(), 'genre': category.title(), 'mediatype': 'video'}) # Create", "of the current section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin content.", "get_categories() # Iterate through categories for category in categories: #", "= get_url(action='play', video=video_url) # Add the list item to a", "= urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 '", "of video categories in the Kodi interface. \"\"\" # Set", "number. _handle = int(sys.argv[1]) # Get an instance of ACI.", ":return: The list of video categories :rtype: types.GeneratorType \"\"\" return", "retrieves the list of video streams in the given category", "names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: # Load ATV. ATV.load_aci()", "name :type category: str :return: the list of videos in", "functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :return: The list of", "for simplicity's sake. # setInfo allows to set various information", "# Set 'IsPlayable' property to 'true'. # This is mandatory", "\\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\ # urllib.quote_plus(video['reference']) # url =", "\"\"\" Play a video by the provided path. :param path:", "is_folder = True means that this item opens a sub-list", "use a category name for both properties for for simplicity's", "paramstring: str \"\"\" # Parse a URL-encoded paramstring to the", "in the Kodi interface. :param category: Category name :type category:", "\"\"\" return ATV.aci.iterkeys() def get_videos(category): \"\"\" Get the list of", "in the category. videos = get_videos(category) # Iterate through each", "category in categories: # xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) # Create a list", "_handle = int(sys.argv[1]) # Get an instance of ACI. ATV", "need to set each image accordingly. list_item.setArt({'thumb': \"icon.png\", 'icon': \"icon.png\",", "paramstring :type paramstring: str \"\"\" # Parse a URL-encoded paramstring", "a plugin recursive call. # Example: plugin://plugin.video.example/?action=play& # video=[video url]", "supported action # we raise an exception. This helps to", "landscape etc.) for the list item. # Here we use", "\"\"\" Create a URL for calling the plugin recursively from", "list of video categories :rtype: types.GeneratorType \"\"\" return ATV.aci.iterkeys() def", "URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category name]", "catch coding errors, # e.g. typos in action names. raise", "coding: utf-8 -*- import sys import urllib import urlparse #", "level items. is_folder = True # Add our item to", "# of the current section. xbmcplugin.setPluginCategory(_handle, category) # Set plugin", "Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): \"\"\" Play", "def list_categories(): \"\"\" Create the list of video categories in", "encoded plugin paramstring :type paramstring: str \"\"\" # Parse a", "'Movies', 'TV-shows', 'Documentaries' etc.) from some site or server. ..", "category from some site or server. .. note:: Consider using", "called from Kodi UI without any parameters, # display the", "ATV.update_aci_movies() print(\"Updated from main movies.\") # Display the list of", "plugin url in plugin:// notation. _url = sys.argv[0] # Get", "for this type of content. xbmcplugin.setContent(_handle, 'videos') # Get the", "list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.) from", "list of video categories list_categories() if __name__ == '__main__': #", "addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the item to", "name] url = get_url(action=\"listing\", category=category) # is_folder = True means", "properties for for simplicity's sake. # setInfo allows to set", "the list item to a virtual Kodi folder. # is_folder", "the given set of keyword arguments. :param kwargs: \"argument=value\" pairs", "a thumbnail image. list_item = xbmcgui.ListItem(label=video_item[\"title\"]) # Set additional info", "path to play. play_item = xbmcgui.ListItem(path=path) # Play with inputstream", "properties see the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is", "in videos: # Get the video item to process. video_item", "type of content. xbmcplugin.setContent(_handle, 'videos') # Get video categories categories", "the name # of the current section. xbmcplugin.setPluginCategory(_handle, 'ACI') #", "<https://wiki.python.org/moin/Generators>`_ instead of returning lists. :param category: Category name :type", "plugin you need to set each image accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"],", "this type of content. xbmcplugin.setContent(_handle, 'videos') # Get the list", "elif params['action'] == 'play': # Play a video from a", "each video. for video_id in videos: # Get the video", "videos[video_id] # Create a list item with a text label", "True, listitem=play_item) def router(paramstring): \"\"\" Router function that calls other", "virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): \"\"\" Create the list of", "Create the list of video categories in the Kodi interface.", "folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): \"\"\" Play a video by the", "# Set plugin content. It allows Kodi to select appropriate", "category name for both properties for for simplicity's sake. #", "# If the provided paramstring does not contain a supported", "xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail, fanart, banner, poster, landscape etc.)", "# Create a list item with a text label and", "def list_videos(category): \"\"\" Create the list of playable videos in", "the router function and pass the plugin call parameters to", "xbmcplugin.endOfDirectory(_handle) def play_video(path): \"\"\" Play a video by the provided", "the list of videos in the category :rtype: list \"\"\"", "banner, poster, landscape etc.) for the list item. # Here", "videos in a provided category. list_videos(params['category']) elif params['action'] == 'play':", "contain a supported action # we raise an exception. This", "If the plugin is called from Kodi UI without any", "'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' }) def get_url(**kwargs): \"\"\" Create a", "# In a real-life plugin you need to set each", "str :return: the list of videos in the category :rtype:", "agent headers for video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT", "listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a sort method", "the Kodi interface. \"\"\" # Set plugin category. It is", "'&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \\ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0;", "video_item['url'] += '|%s&amp;%s' % (user_agent_headers, referer_header) # Create a URL", "Here we use the same image for all items for", "= dict(urlparse.parse_qsl(paramstring)) # Check the parameters passed to the plugin", "open any sub-list. is_folder = False # Add our item", "simplicity's sake. # In a real-life plugin you need to", "player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring): \"\"\" Router function that", "Add the list item to a virtual Kodi folder. #", "trim the leading '?' from the plugin call paramstring router(sys.argv[2][1:])", "+ \\ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101", "with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the", "category: str :return: the list of videos in the category", "video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \\ #", "# Get the list of videos in the category. videos", "category. videos = get_videos(category) # Iterate through each video. for", "thumbnail image. list_item = xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail, fanart,", "creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): \"\"\" Play a", "url = get_url(action='play', video=video_url) # Add the list item to", "# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for", "item with a text label and a thumbnail image. list_item", "Get the list of video files/streams. Here you can insert", "Iterate through each video. for video_id in videos: # Get", "thumbnail image. list_item = xbmcgui.ListItem(label=video_item[\"title\"]) # Set additional info for", "get_url(action='play', video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \\", "# e.g. typos in action names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring))", "call parameters to it. # We use string slicing to", "set each image accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"]", "params['category'] == \"shows\": ATV.update_aci_shows() print(\"Updated from main shows.\") elif params['category']", "the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.)", "Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add", "list_item.setInfo('video', {'title': category.title(), 'genre': category.title(), 'mediatype': 'video'}) # Create a", "# Example: plugin://plugin.video.example/?action=play& # video=[video url] url = get_url(action='play', video=video_item['url'])", "to 'true'. # This is mandatory for playable items! list_item.setProperty('IsPlayable',", "video categories list_categories() if __name__ == '__main__': # Call the", "# setInfo allows to set various information for an item.", "for playable items! list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url']", "parameters to it. # We use string slicing to trim", "list_categories(): \"\"\" Create the list of video categories in the", "from main shows.\") elif params['category'] == \"cable\": ATV.update_aci_cable() print(\"Updated from", "list of videos in a provided category. list_videos(params['category']) elif params['action']", "call. # Example: plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action=\"listing\", category=category) #", "note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists.", "URL :rtype: str \"\"\" return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): \"\"\"", "insert some parsing code that retrieves the list of video", "mandatory for playable items! list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({\"Referer\": video_item[\"location\"]})", "plugin call parameters to it. # We use string slicing", "\"shows\": ATV.update_aci_shows() print(\"Updated from main shows.\") elif params['category'] == \"cable\":", "sub-list of lower level items. is_folder = True # Add", "and a thumbnail image. list_item = xbmcgui.ListItem(label=video_item[\"title\"]) # Set additional", "ListItem correctly. list_item.setInfo('video', {'title': category.title(), 'genre': category.title(), 'mediatype': 'video'}) #", "# Here we use the same image for all items", "using `generator functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :return: The", "{'title': video_item[\"title\"], 'genre': category.title(), 'mediatype': 'video'}) # Set graphics (thumbnail,", "lists. :param category: Category name :type category: str :return: the", "video=[video url] url = get_url(action='play', video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url='", "This helps to catch coding errors, # e.g. typos in", "categories for category in categories: # xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) # Create", "\"\"\" return ATV.aci[category] def list_categories(): \"\"\" Create the list of", "# Get the video item to process. video_item = videos[video_id]", "to select appropriate views # for this type of content.", "the Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring): \"\"\" Router", "keyword arguments. :param kwargs: \"argument=value\" pairs :type kwargs: dict :return:", "\"argument=value\" pairs :type kwargs: dict :return: plugin call URL :rtype:", "using `generators functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :param category:", "label and a thumbnail image. list_item = xbmcgui.ListItem(label=category.title()) # Set", "urllib.quote_plus(video['video']) + \\ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \\ #", "\"\"\" # Set plugin category. It is displayed in some", "is_folder = True # Add our item to the Kodi", ":param path: Fully-qualified video URL :type path: str \"\"\" #", "a URL-encoded paramstring to the dictionary of # {<parameter>: <value>}", "video_item[\"title\"], 'genre': category.title(), 'mediatype': 'video'}) # Set graphics (thumbnail, fanart,", "this type of content. xbmcplugin.setContent(_handle, 'videos') # Get video categories", "is displayed in some skins as the name # of", "is needed for skin to display info for this ListItem", "= get_categories() # Iterate through categories for category in categories:", "# '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 '", "list item to a virtual Kodi folder. # is_folder =", "== \"shows\": ATV.update_aci_shows() print(\"Updated from main shows.\") elif params['category'] ==", "shows.\") elif params['category'] == \"cable\": ATV.update_aci_cable() print(\"Updated from main cable.\")", "from main movies.\") # Display the list of videos in", "in categories: # xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) # Create a list item", "needed for skin to display info for this ListItem correctly.", "that this item opens a sub-list of lower level items.", "NT 10.0; WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32'", "additional info for the list item. # Here we use", "xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a sort method for", "the list item. # Here we use the same image", "xbmcplugin.setContent(_handle, 'videos') # Get the list of videos in the", "list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url'] += '|%s&amp;%s' %", "list of video streams in the given category from some", "returning lists. :param category: Category name :type category: str :return:", "category. It is displayed in some skins as the name", "videos: # Get the video item to process. video_item =", "video by the provided path. :param path: Fully-qualified video URL", "as the name # of the current section. xbmcplugin.setPluginCategory(_handle, category)", "Display the list of videos in a provided category. list_videos(params['category'])", "# import xbmc import xbmcgui import xbmcplugin import aci #", "skins as the name # of the current section. xbmcplugin.setPluginCategory(_handle,", "'fanart': video_item[\"thumbnail\"] }) # Set 'IsPlayable' property to 'true'. #", "sort method for the virtual folder items (alphabetically, ignore articles)", "the category. videos = get_videos(category) # Iterate through each video.", "ACI. ATV = aci.ACI() ATV.load_aci() # Encode user agent headers", "Kodi interface. \"\"\" # Set plugin category. It is displayed", ":rtype: str \"\"\" return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): \"\"\" Get", "get_videos(category): \"\"\" Get the list of video files/streams. Here you", "True # Add our item to the Kodi virtual folder", "server. .. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_ instead of", "is mandatory for playable items! list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({\"Referer\":", "lists. :return: The list of video categories :rtype: types.GeneratorType \"\"\"", "list_item = xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail, fanart, banner, poster,", "pass the plugin call parameters to it. # We use", "paramstring does not contain a supported action # we raise", "videos in the Kodi interface. :param category: Category name :type", "for skin to display info for this ListItem correctly. list_item.setInfo('video',", "int(sys.argv[1]) # Get an instance of ACI. ATV = aci.ACI()", "== \"cable\": ATV.update_aci_cable() print(\"Updated from main cable.\") elif params['category'] ==", "video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0)", ":rtype: types.GeneratorType \"\"\" return ATV.aci.iterkeys() def get_videos(category): \"\"\" Get the", "'play': # Play a video from a provided URL. play_video(params['video'])", "item won't open any sub-list. is_folder = False # Add", "use string slicing to trim the leading '?' from the", "UI without any parameters, # display the list of video", "in the category :rtype: list \"\"\" return ATV.aci[category] def list_categories():", "videos in the category :rtype: list \"\"\" return ATV.aci[category] def", "is needed for a skin to display info for this", "list of videos in the category. videos = get_videos(category) #", "list item with a text label and a thumbnail image.", ":param paramstring: URL encoded plugin paramstring :type paramstring: str \"\"\"", "of video categories :rtype: types.GeneratorType \"\"\" return ATV.aci.iterkeys() def get_videos(category):", ":rtype: list \"\"\" return ATV.aci[category] def list_categories(): \"\"\" Create the", "types.GeneratorType \"\"\" return ATV.aci.iterkeys() def get_videos(category): \"\"\" Get the list", "given category from some site or server. .. note:: Consider", "'fanart': \"icon.png\"}) # Set additional info for the list item.", "video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"] }) # Set 'IsPlayable' property to 'true'.", "integer number. _handle = int(sys.argv[1]) # Get an instance of", "path. :param path: Fully-qualified video URL :type path: str \"\"\"", "video categories categories = get_categories() # Iterate through categories for", "For available properties see the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 #", "Get the plugin handle as an integer number. _handle =", "provided path. :param path: Fully-qualified video URL :type path: str", "WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' }) def", "the plugin if params: if params['action'] == 'listing': # Load", "a sort method for the virtual folder items (alphabetically, ignore", "ListItem correctly. list_item.setInfo('video', {'title': video_item[\"title\"], 'genre': category.title(), 'mediatype': 'video'}) #", "exception. This helps to catch coding errors, # e.g. typos", "sys.argv[0] # Get the plugin handle as an integer number.", "params = dict(urlparse.parse_qsl(paramstring)) # Check the parameters passed to the", "by the provided path. :param path: Fully-qualified video URL :type", "allows Kodi to select appropriate views # for this type", "can insert some parsing code that retrieves the list of", "setInfo allows to set various information for an item. #", "for category in categories: # xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) # Create a", "}) # Set 'IsPlayable' property to 'true'. # This is", "in action names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: # Load", "# Get an instance of ACI. ATV = aci.ACI() ATV.load_aci()", "list_item, is_folder) # Add a sort method for the virtual", "for for simplicity's sake. # setInfo allows to set various", "arguments. :param kwargs: \"argument=value\" pairs :type kwargs: dict :return: plugin", "item opens a sub-list of lower level items. is_folder =", "of videos in the category. videos = get_videos(category) # Iterate", "we raise an exception. This helps to catch coding errors,", "'mediatype' is needed for a skin to display info for", "the plugin handle as an integer number. _handle = int(sys.argv[1])", ":type category: str \"\"\" # Set plugin category. It is", "interface. \"\"\" # Set plugin category. It is displayed in", "\"\"\" # Parse a URL-encoded paramstring to the dictionary of", "'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \\ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) +", "plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action=\"listing\", category=category) # is_folder = True", "poster, landscape etc.) for the list item. # Here we", "list item. # Here we use the same image for", "of returning lists. :param category: Category name :type category: str", "this ListItem correctly. list_item.setInfo('video', {'title': category.title(), 'genre': category.title(), 'mediatype': 'video'})", "\"\"\" # Create a playable item with a path to", "inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the item", "of the current section. xbmcplugin.setPluginCategory(_handle, category) # Set plugin content.", "functions depending on the provided paramstring :param paramstring: URL encoded", "or server. .. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_ instead", "ATV.update_aci_cable() print(\"Updated from main cable.\") elif params['category'] == \"movies\": ATV.update_aci_movies()", "' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' }) def get_url(**kwargs): \"\"\" Create", "categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.) from some site or", "a list item with a text label and a thumbnail", "plugin category. It is displayed in some skins as the", "a video by the provided path. :param path: Fully-qualified video", "virtual Kodi folder. # is_folder = False means that this", "for simplicity's sake. # In a real-life plugin you need", "URL encoded plugin paramstring :type paramstring: str \"\"\" # Parse", "for a skin to display info for this ListItem correctly.", "folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating", "category.title(), 'mediatype': 'video'}) # Set graphics (thumbnail, fanart, banner, poster,", "# video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \\ # '&amp;streamtype=HLSRETRY&name='", "# display the list of video categories list_categories() if __name__", "import sys import urllib import urlparse # import xbmc import", "# Check the parameters passed to the plugin if params:", "referer_header = urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url'] += '|%s&amp;%s' % (user_agent_headers, referer_header)", "plugin content. It allows Kodi to select appropriate views #", "for an item. # For available properties see the following", "the videos for aci. if params['category'] == \"shows\": ATV.update_aci_shows() print(\"Updated", "slicing to trim the leading '?' from the plugin call", "(alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual", "Call the router function and pass the plugin call parameters", "# Display the list of videos in a provided category.", "need to set each image accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"],", "parameters passed to the plugin if params: if params['action'] ==", "a provided URL. play_video(params['video']) else: # If the provided paramstring", "for video_id in videos: # Get the video item to", "main shows.\") elif params['category'] == \"cable\": ATV.update_aci_cable() print(\"Updated from main", "items! list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url'] += '|%s&amp;%s'", "Add a sort method for the virtual folder items (alphabetically,", "each image accordingly. list_item.setArt({'thumb': \"icon.png\", 'icon': \"icon.png\", 'fanart': \"icon.png\"}) #", "opens a sub-list of lower level items. is_folder = True", "elif params['category'] == \"cable\": ATV.update_aci_cable() print(\"Updated from main cable.\") elif", "Load the videos for aci. if params['category'] == \"shows\": ATV.update_aci_shows()", "typos in action names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: #", "# We use string slicing to trim the leading '?'", "= True means that this item opens a sub-list of", "def play_video(path): \"\"\" Play a video by the provided path.", "'ACI') # Set plugin content. It allows Kodi to select", "graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list", "same image for all items for simplicity's sake. # In", "This is mandatory for playable items! list_item.setProperty('IsPlayable', 'true') referer_header =", "Get an instance of ACI. ATV = aci.ACI() ATV.load_aci() #", "Get the list of video categories. Here you can insert", "info for the list item. # Here we use a", "str \"\"\" return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): \"\"\" Get the", "instead of returning lists. :return: The list of video categories", "a text label and a thumbnail image. list_item = xbmcgui.ListItem(label=category.title())", "item. # Here we use a category name for both", "Get the video item to process. video_item = videos[video_id] #", "instead of returning lists. :param category: Category name :type category:", "# urllib.quote_plus(video['reference']) # url = get_url(action='play', video=video_url) # Add the", "ATV.aci[category] def list_categories(): \"\"\" Create the list of video categories", "Set 'IsPlayable' property to 'true'. # This is mandatory for", "'listing': # Load the videos for aci. if params['category'] ==", "Kodi UI without any parameters, # display the list of", "items. is_folder = True # Add our item to the", "# Get the plugin handle as an integer number. _handle", "of videos in a provided category. list_videos(params['category']) elif params['action'] ==", "errors, # e.g. typos in action names. raise ValueError('Invalid paramstring:", "paramstring to the dictionary of # {<parameter>: <value>} elements params", "\"icon.png\", 'fanart': \"icon.png\"}) # Set additional info for the list", "xbmcgui.ListItem(label=video_item[\"title\"]) # Set additional info for the list item. #", "on the provided paramstring :param paramstring: URL encoded plugin paramstring", "a URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category", "items for simplicity's sake. # In a real-life plugin you", "creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): \"\"\" Create the", "this item opens a sub-list of lower level items. is_folder", "and a thumbnail image. list_item = xbmcgui.ListItem(label=category.title()) # Set graphics", "# '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \\ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT", "set various information for an item. # For available properties", "Firefox/47.0 ' \\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\ # urllib.quote_plus(video['reference']) #", "categories in the Kodi interface. \"\"\" # Set plugin category.", "the current section. xbmcplugin.setPluginCategory(_handle, category) # Set plugin content. It", "video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"] }) # Set 'IsPlayable' property", "function and pass the plugin call parameters to it. #", "10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \\ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' +", "the provided path. :param path: Fully-qualified video URL :type path:", "from some site or server. .. note:: Consider using `generators", "select appropriate views # for this type of content. xbmcplugin.setContent(_handle,", "categories :rtype: types.GeneratorType \"\"\" return ATV.aci.iterkeys() def get_videos(category): \"\"\" Get", "a plugin recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category name] url =", "following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed for a", "# Parse a URL-encoded paramstring to the dictionary of #", "coding errors, # e.g. typos in action names. raise ValueError('Invalid", "category: str \"\"\" # Set plugin category. It is displayed", "you can insert some parsing code that retrieves the list", "params['action'] == 'play': # Play a video from a provided", "the list of video categories in the Kodi interface. \"\"\"", "xbmcplugin import aci # Get the plugin url in plugin://", "is_folder) # Add a sort method for the virtual folder", "}) def get_url(**kwargs): \"\"\" Create a URL for calling the", "for a plugin recursive call. # Example: plugin://plugin.video.example/?action=play& # video=[video", "a video from a provided URL. play_video(params['video']) else: # If", "info for this ListItem correctly. list_item.setInfo('video', {'title': video_item[\"title\"], 'genre': category.title(),", "to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)", "urllib.quote_plus(video['reference']) # url = get_url(action='play', video=video_url) # Add the list", "the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) #", "+ urllib.quote_plus(video['video']) + \\ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \\", "params: if params['action'] == 'listing': # Load the videos for", "get_url(**kwargs): \"\"\" Create a URL for calling the plugin recursively", "list_categories() if __name__ == '__main__': # Call the router function", "def get_url(**kwargs): \"\"\" Create a URL for calling the plugin", "else: # Load ATV. ATV.load_aci() # If the plugin is", "of videos in the category :rtype: list \"\"\" return ATV.aci[category]", "URL :type path: str \"\"\" # Create a playable item", "function that calls other functions depending on the provided paramstring", "fanart, banner, poster, landscape etc.) for the list item. #", "<https://wiki.python.org/moin/Generators>`_ instead of returning lists. :return: The list of video", "Consider using `generators functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :param", "displayed in some skins as the name # of the", "any sub-list. is_folder = False # Add our item to", "\"cable\": ATV.update_aci_cable() print(\"Updated from main cable.\") elif params['category'] == \"movies\":", "video_item[\"location\"]}) video_item['url'] += '|%s&amp;%s' % (user_agent_headers, referer_header) # Create a", "in plugin:// notation. _url = sys.argv[0] # Get the plugin", "needed for a skin to display info for this ListItem", "get_videos(category) # Iterate through each video. for video_id in videos:", "of # {<parameter>: <value>} elements params = dict(urlparse.parse_qsl(paramstring)) # Check", "means that this item won't open any sub-list. is_folder =", "_url = sys.argv[0] # Get the plugin handle as an", "urllib.urlencode(kwargs)) def get_categories(): \"\"\" Get the list of video categories.", "categories = get_categories() # Iterate through categories for category in", "(Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \\ #", "to trim the leading '?' from the plugin call paramstring", "for both properties for for simplicity's sake. # setInfo allows", "the item to the Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def", ":param kwargs: \"argument=value\" pairs :type kwargs: dict :return: plugin call", "'ShockwaveFlash/2172.16.17.32' }) def get_url(**kwargs): \"\"\" Create a URL for calling", "# Play a video from a provided URL. play_video(params['video']) else:", "the plugin is called from Kodi UI without any parameters,", "the list of playable videos in the Kodi interface. :param", "= urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url'] += '|%s&amp;%s' % (user_agent_headers, referer_header) #", "call. # Example: plugin://plugin.video.example/?action=play& # video=[video url] url = get_url(action='play',", "<value>} elements params = dict(urlparse.parse_qsl(paramstring)) # Check the parameters passed", "the list item. # 'mediatype' is needed for skin to", "10.0; WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' })", "list \"\"\" return ATV.aci[category] def list_categories(): \"\"\" Create the list", "available properties see the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype'", "'true') referer_header = urllib.urlencode({\"Referer\": video_item[\"location\"]}) video_item['url'] += '|%s&amp;%s' % (user_agent_headers,", "# of the current section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin", "def get_videos(category): \"\"\" Get the list of video files/streams. Here", "list_item.setArt({'thumb': \"icon.png\", 'icon': \"icon.png\", 'fanart': \"icon.png\"}) # Set additional info", "= get_videos(category) # Iterate through each video. for video_id in", "to set each image accordingly. list_item.setArt({'thumb': \"icon.png\", 'icon': \"icon.png\", 'fanart':", "the dictionary of # {<parameter>: <value>} elements params = dict(urlparse.parse_qsl(paramstring))", "Fully-qualified video URL :type path: str \"\"\" # Create a", "cable.\") elif params['category'] == \"movies\": ATV.update_aci_movies() print(\"Updated from main movies.\")", "urllib import urlparse # import xbmc import xbmcgui import xbmcplugin", "method for the virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle,", "ATV. ATV.load_aci() # If the plugin is called from Kodi", "text label and a thumbnail image. list_item = xbmcgui.ListItem(label=video_item[\"title\"]) #", "url in plugin:// notation. _url = sys.argv[0] # Get the", "'mediatype': 'video'}) # Set graphics (thumbnail, fanart, banner, poster, landscape", "that retrieves the list of video categories (e.g. 'Movies', 'TV-shows',", "instance of ACI. ATV = aci.ACI() ATV.load_aci() # Encode user", "to the dictionary of # {<parameter>: <value>} elements params =", "recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action=\"listing\", category=category)", "allows to set various information for an item. # For", "# Get video categories categories = get_categories() # Iterate through", "__name__ == '__main__': # Call the router function and pass", "item to the Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring):", "of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.) from some", "label and a thumbnail image. list_item = xbmcgui.ListItem(label=video_item[\"title\"]) # Set", "return ATV.aci[category] def list_categories(): \"\"\" Create the list of video", "# Iterate through each video. for video_id in videos: #", "kwargs: \"argument=value\" pairs :type kwargs: dict :return: plugin call URL", "url = get_url(action='play', video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video'])", "action names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: # Load ATV.", "simplicity's sake. # setInfo allows to set various information for", "this ListItem correctly. list_item.setInfo('video', {'title': video_item[\"title\"], 'genre': category.title(), 'mediatype': 'video'})", "for calling the plugin recursively from the given set of", "code that retrieves the list of video categories (e.g. 'Movies',", "list_videos(category): \"\"\" Create the list of playable videos in the", "import urlparse # import xbmc import xbmcgui import xbmcplugin import", "FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32' }) def get_url(**kwargs): \"\"\" Create a URL", ":param category: Category name :type category: str :return: the list", "get_url(action=\"listing\", category=category) # is_folder = True means that this item", "a virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): \"\"\" Play a video", "Create a URL for calling the plugin recursively from the", "# Load the videos for aci. if params['category'] == \"shows\":", "list item. # 'mediatype' is needed for skin to display", "True means that this item opens a sub-list of lower", "or server. .. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_ instead", "in the Kodi interface. \"\"\" # Set plugin category. It", "Get the list of videos in the category. videos =", "# Iterate through categories for category in categories: # xbmc.log(category.encode(\"utf-8\"),", "to display info for this ListItem correctly. list_item.setInfo('video', {'title': video_item[\"title\"],", "'video'}) # Set graphics (thumbnail, fanart, banner, poster, landscape etc.)", "action # we raise an exception. This helps to catch", "Consider using `generator functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :return:", "'genre': category.title(), 'mediatype': 'video'}) # Create a URL for a", "= get_url(action='play', video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) +", "Create a list item with a text label and a", "Kodi interface. :param category: Category name :type category: str \"\"\"", "dict :return: plugin call URL :rtype: str \"\"\" return '{0}?{1}'.format(_url,", "(e.g. 'Movies', 'TV-shows', 'Documentaries' etc.) from some site or server.", "for the list item. # Here we use a category", "videos in the category. videos = get_videos(category) # Iterate through", "elements params = dict(urlparse.parse_qsl(paramstring)) # Check the parameters passed to", "handle as an integer number. _handle = int(sys.argv[1]) # Get", "xbmcplugin.setPluginCategory(_handle, category) # Set plugin content. It allows Kodi to", "that retrieves the list of video streams in the given", "# 'mediatype' is needed for a skin to display info", "link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed for a skin", "for this ListItem correctly. list_item.setInfo('video', {'title': category.title(), 'genre': category.title(), 'mediatype':", "an item. # For available properties see the following link:", "# Pass the item to the Kodi player. xbmcplugin.setResolvedUrl(_handle, True,", "\"\"\" Router function that calls other functions depending on the", "list of videos in the category :rtype: list \"\"\" return", "appropriate views # for this type of content. xbmcplugin.setContent(_handle, 'videos')", "etc.) for the list item. # Here we use the", "and pass the plugin call parameters to it. # We", "to process. video_item = videos[video_id] # Create a list item", "main movies.\") # Display the list of videos in a", "headers for video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;", "# xbmc.log(category.encode(\"utf-8\"), xbmc.LOGNOTICE) # Create a list item with a", "video categories :rtype: types.GeneratorType \"\"\" return ATV.aci.iterkeys() def get_videos(category): \"\"\"", "categories categories = get_categories() # Iterate through categories for category", "urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0", "category. list_videos(params['category']) elif params['action'] == 'play': # Play a video", "The list of video categories :rtype: types.GeneratorType \"\"\" return ATV.aci.iterkeys()", "video_item[\"thumbnail\"] }) # Set 'IsPlayable' property to 'true'. # This", "a supported action # we raise an exception. This helps", "xbmc import xbmcgui import xbmcplugin import aci # Get the", "category :rtype: list \"\"\" return ATV.aci[category] def list_categories(): \"\"\" Create", "# {<parameter>: <value>} elements params = dict(urlparse.parse_qsl(paramstring)) # Check the", "Add our item to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle,", "video item to process. video_item = videos[video_id] # Create a", "Set additional info for the list item. # 'mediatype' is", "= aci.ACI() ATV.load_aci() # Encode user agent headers for video.", ":type paramstring: str \"\"\" # Parse a URL-encoded paramstring to", "process. video_item = videos[video_id] # Create a list item with", "of lower level items. is_folder = True # Add our", "str \"\"\" # Set plugin category. It is displayed in", "video streams in the given category from some site or", "the provided paramstring :param paramstring: URL encoded plugin paramstring :type", "else: # If the provided paramstring does not contain a", "= 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \\ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name'])", "an instance of ACI. ATV = aci.ACI() ATV.load_aci() # Encode", "= False means that this item won't open any sub-list.", "with a path to play. play_item = xbmcgui.ListItem(path=path) # Play", "video from a provided URL. play_video(params['video']) else: # If the", "== 'play': # Play a video from a provided URL.", "`generator functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :return: The list", "Play with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass", "sub-list. is_folder = False # Add our item to the", "paramstring :param paramstring: URL encoded plugin paramstring :type paramstring: str", "-*- import sys import urllib import urlparse # import xbmc", "plugin you need to set each image accordingly. list_item.setArt({'thumb': \"icon.png\",", "# for this type of content. xbmcplugin.setContent(_handle, 'videos') # Get", "Create the list of playable videos in the Kodi interface.", "+ urllib.quote_plus(video['name']) + \\ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64;", "without any parameters, # display the list of video categories", "Kodi to select appropriate views # for this type of", "from the given set of keyword arguments. :param kwargs: \"argument=value\"", "a skin to display info for this ListItem correctly. list_item.setInfo('video',", "xbmcplugin.endOfDirectory(_handle) def list_videos(category): \"\"\" Create the list of playable videos", "ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder.", "user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101", "path: str \"\"\" # Create a playable item with a", "import xbmc import xbmcgui import xbmcplugin import aci # Get", "some parsing code that retrieves the list of video categories", "Parse a URL-encoded paramstring to the dictionary of # {<parameter>:", "folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): \"\"\" Create the list of playable", "params['category'] == \"cable\": ATV.update_aci_cable() print(\"Updated from main cable.\") elif params['category']", "the plugin call parameters to it. # We use string", "skin to display info for this ListItem correctly. list_item.setInfo('video', {'title':", "the list of video streams in the given category from", "you need to set each image accordingly. list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon':", "Set additional info for the list item. # Here we", "play_video(path): \"\"\" Play a video by the provided path. :param", "'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4',", "# Get the plugin url in plugin:// notation. _url =", "sake. # setInfo allows to set various information for an", "for a plugin recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category name] url", "xbmcgui.ListItem(path=path) # Play with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')", "'videos') # Get video categories categories = get_categories() # Iterate", "'{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): \"\"\" Get the list of video", "movies.\") # Display the list of videos in a provided", "Play a video from a provided URL. play_video(params['video']) else: #", "== '__main__': # Call the router function and pass the", "Kodi folder. # is_folder = False means that this item", "item. # 'mediatype' is needed for skin to display info", "# video=[video url] url = get_url(action='play', video=video_item['url']) # video_url =", ":type path: str \"\"\" # Create a playable item with", "# Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): \"\"\"", "given set of keyword arguments. :param kwargs: \"argument=value\" pairs :type", "videos = get_videos(category) # Iterate through each video. for video_id", "name # of the current section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set", "a playable item with a path to play. play_item =", "# 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \\ # urllib.quote_plus(video['reference']) # url = get_url(action='play',", "recursive call. # Example: plugin://plugin.video.example/?action=play& # video=[video url] url =", "a virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): \"\"\" Create the list", "== 'listing': # Load the videos for aci. if params['category']", "plugin if params: if params['action'] == 'listing': # Load the", "'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the item to the Kodi", "if params['action'] == 'listing': # Load the videos for aci.", "the list item. # Here we use a category name", "of video categories list_categories() if __name__ == '__main__': # Call", "the list of video categories. Here you can insert some", "play_video(params['video']) else: # If the provided paramstring does not contain", "current section. xbmcplugin.setPluginCategory(_handle, category) # Set plugin content. It allows", "of returning lists. :return: The list of video categories :rtype:", "call URL :rtype: str \"\"\" return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories():", "# Set additional info for the list item. # 'mediatype'", "an integer number. _handle = int(sys.argv[1]) # Get an instance", "It allows Kodi to select appropriate views # for this", "parameters, # display the list of video categories list_categories() if", "for the list item. # Here we use the same", "= videos[video_id] # Create a list item with a text", "list_item.setArt({'thumb': video_item[\"thumbnail\"], 'icon': video_item[\"thumbnail\"], 'fanart': video_item[\"thumbnail\"] }) # Set 'IsPlayable'", "# Here we use a category name for both properties", "# -*- coding: utf-8 -*- import sys import urllib import", "additional info for the list item. # 'mediatype' is needed", "Router function that calls other functions depending on the provided" ]
[ "Handle the cases where placeholder is output. There is a", "output. There is a case where the program is like", "idx, inp in enumerate(inputs): # We set the default image", "newly created for tf_name. \"\"\" if tf_name in self.context: #", "# 'Placeholder_1'] # node name: while_0 op type: while inputs:", "else [outputs] outputs = [x if isinstance(x, str) else x.name", "unpack a python tuple of Vars # ('while_0:0', 'while_0:1') returned", "if x.name in tf_placeholder_names] # We fill in shapes for", "in self.context class TFConverter: def __init__(self, tfssa, inputs=None, outputs=None, **kwargs):", "= tf_placeholder_names[0] # filter out those inputs which is not", "\" Warning: Node added to context must have the same", "needed when the last op doesn't # generate a new", "instead.\".format( [type(i) for i in inputs] ) ) # Special", "does not match TensorFlow's node name ({}).\" \" Warning: Node", "outputs) outputs = main_func.outputs if outputs is None else outputs", "output to Placeholder:0 by inserting an identity \"\"\" block =", "name or input name was not provided\" ) if inp.name", "with block: name_counts = {} new_outputs = [output for output", "last op doesn't # generate a new Var (e.g., get_tuple,", "the default image format in TF as NHWC, since NHWC", "def _get_stack(self, tfssa, root=\"main\"): # We're trying to get a", "'while_0:1') returned from while_0 SSA op. We need to #", "loop through the graphs. # This is NOT necessarily a", "} -> (%bool_var1) # body_block1(%a.x, %b.x) { # %ret_axx =", "placeholder is output. There is a case where the program", "# rename `while_0:0` to `while/Exit` in order for users to", "+ all_nodes: raise KeyError('Output node name \"{}\" does exist.'.format(n)) def", "is None: inputs[0].name = tf_placeholder_names[0] # filter out those inputs", "source code is governed by a BSD-3-clause license that can", "op type: get_tuple inputs: ['while_0'] # node name: while/Exit_1 op", "# body_block1(%a.x, %b.x) { # %ret_axx = while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x)", "generate a new Var (e.g., get_tuple, Identity etc.), and thus", "= v_o.name + \"_duplicate_\" + str(name_counts[v_o.name]) x = mb.identity(x=v_o, name=new_name)", "inputtype.shape.shape] node.attr[\"_output_shapes\"] = [shape] # list of length 1 #", "convert_graph from coremltools.converters.mil.mil import Builder as mb from coremltools.converters.mil.mil import", "tfssa.functions[\"main\"] graph = main_func.graph # Filter the inputs to only", "raise ValueError(error_message) return shape def _get_stack(self, tfssa, root=\"main\"): # We're", "tuple of TensorType or ImageType, got {} instead.\".format( [type(i) for", "functions are translated to nested # blocks in Program, like", "if isinstance(tensor, str): ret = tensor else: ret = tensor.name", "for output, output_name in zip(block.outputs, outputs_name): if output.name not in", "def get_graph(self, graph_name): if graph_name not in self.graphs: msg =", "name. # # Example: # # TF code: # x", ") from coremltools.converters.mil.input_types import Shape as InputShape from coremltools.converters.mil.mil.var import", "dtype = node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape = [get_new_symbol()", "while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x, %b.x) { # ...some ops #", "inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name, name) if tfgraph[name].attr.get(\"shape\", None) is not None:", "from .convert_utils import convert_graph from coremltools.converters.mil.mil import Builder as mb", "list of length 1 # infer outputs if not provided", "inputs]): raise ValueError( \"Type of inputs should be list or", "outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs] output_nodes", "the last op doesn't # generate a new Var (e.g.,", "be unstacked once cond_block2 is done. self.func_input_stack = [] #", "self.context[tf_name] = ssa_vars def add_graph(self, graph_name, graph): self.graphs[graph_name] = graph", "an identity \"\"\" block = prog[\"main\"] input_name = [x.name for", "map of tf_node.name --> ssa_var available # to the current", "(tuple, list)) else [outputs] output_nodes = [] for f in", "which in our translation simply unpack a python tuple of", "graph_stack @staticmethod def _get_tensor_name(tensor): ret = None if isinstance(tensor, str):", "body twice (which is # needed to figure out shapes", "class TranscriptionContext: def __init__(self, name=None): self.name = name if name", "to context.\" ) raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] = ssa_vars def", "graph = self.tfssa.functions[\"main\"].graph for g_name in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog,", "= self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape = [get_new_symbol() if s is None", "prog[\"main\"] input_name = [x.name for x in list(block.inputs.values())] with block:", "(%ret_axx) # ....some ops using %ret_a # } -> (%ret_ax,", "as the name passed to context.\" ) raise ValueError(msg.format(tf_name, ssa_vars.name))", "# TF loops are represented as functions, so nested loops", "cond_block2, we'd have func_input_stack # # (%a.x.x,) # (%a.x, %b.x)", "unstacked once cond_block2 is done. self.func_input_stack = [] # list", "name: Placeholder op type: Placeholder inputs: [] # node name:", "out_name and v_o.name not in input_names: logging.info( \"Renaming output var:", "to None. outputs: list of str or str, optional, defaults", "else: inputs = [] placeholder_names = tf_placeholder_names # name ->", "is None: raise ValueError( \"Unable to infer input's name or", "outputs) or Var (single_output) is_new_var: True if ssa_vars are newly", "to nested # blocks in Program, like # # while_loop(loop_vars=(%a,", "x in inputs if x.name in tf_placeholder_names] # We fill", "raise ValueError( \"Type of inputs should be list or tuple,", "outputs=None, **kwargs): \"\"\" tfssa: TensorFlow IR. inputs: list of TensorType", "False self.inputs = tuple(inputs) for inputtype in self.inputs: if not", "node name: while_0 op type: while inputs: ['make_input_0'] # node", ") if not all([isinstance(i, InputType) for i in inputs]): raise", "# where [%a.x.x] would be unstacked once cond_block2 is done.", "TranscriptionContext: def __init__(self, name=None): self.name = name if name is", "tfssa.functions[fname].graph.values(): func_x, func_y = None, None if node.op == \"while\":", "graph_name, graph): self.graphs[graph_name] = graph def get_graph(self, graph_name): if graph_name", "in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs = convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs)", "available\") return self.func_input_stack[-1] def __getitem__(self, tf_name): if tf_name not in", "not in input_name or output.name == output_name: new_output = output", "need to # rename `while_0:0` to `while/Exit` in order for", "should be list or tuple of TensorType or ImageType, got", "We set the default image format in TF as NHWC,", "logging.info( \"Adding Input not specified by users: '{}'\".format( added_inputs) )", "node = graph[inp] dtype = node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp)", "# create mb.identity for those cases block = prog[\"main\"] with", "shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp] = (shape, dtype) if len(added_inputs)", "get_tuple inputs: ['while_0'] # # Observe that return node `while/Exit`", "for idx, inp in enumerate(inputs): # We set the default", "like # # while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x, %b.x) { #", "if len(added_inputs) > 0: logging.info( \"Adding Input not specified by", "for s in shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp] = (shape,", "graph_name not in self.graphs: msg = \"Graph '{}' not found", ") if inp.name not in tf_placeholder_names: raise ValueError( \"Input ({})", "where [%a.x.x] would be unstacked once cond_block2 is done. self.func_input_stack", "= self.tfssa.functions[\"main\"].graph for g_name in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph)", "where the program is like main(%Placeholder: (5,fp32)) { block3() {", "while loop cond & body): # # node name: Placeholder", "self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape = [get_new_symbol() if s is None or", "tf_name. \"\"\" if tf_name in self.context: # Overriding allow us", "last TF op's name. # # Example: # # TF", "the converter will try to extract the output information from", "if func_x and fname not in dep[func_x]: dep[func_x].append(fname) if func_y", "inputs=None, outputs=None, **kwargs): \"\"\" tfssa: TensorFlow IR. inputs: list of", "in our translation simply unpack a python tuple of Vars", "v_o, out_name in zip(prog[\"main\"].outputs, self.outputs): if v_o.name != out_name and", "(%new_a.x.x) # } -> (%ret_axx) # ....some ops using %ret_a", "that users didn't specify. user_input_names = [inp.name for inp in", "-> (%ret_axx) # ....some ops using %ret_a # } ->", "node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape = [get_new_symbol() if s", "x in list(block.inputs.values())] with block: new_outputs = [] for output,", "a python tuple of Vars # ('while_0:0', 'while_0:1') returned from", "for v_o, out_name in zip(prog[\"main\"].outputs, self.outputs): if v_o.name != out_name", "mb.identity for those cases block = prog[\"main\"] with block: name_counts", "of Vars # ('while_0:0', 'while_0:1') returned from while_0 SSA op.", "output_nodes = [] for f in tfssa.functions.values(): output_nodes += list(f.outputs)", "self.graph_stack[0] != \"main\": msg = \"TF root graph must be", "for inp in inputs: # Check inputs existence if inp.name", "Program. These passes are different # from passes applied to", "shape for inp in inputs: # Check inputs existence if", "= {} for input_type in self.inputs: func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape,", "that we run conversion sequentially. self.graph_stack = self._get_stack(tfssa, root=\"main\") self.context", "TF frontend passes on Program. These passes are different #", "_get_shaping_class does not accept -1 or None dimension. shape =", "the output nodes or a str for single output name.", "x in outputs] self.outputs = outputs # We would like", "NOT necessarily a DAG. dep = {x: [] for x", "inputs if x.name in tf_placeholder_names] # We fill in shapes", "ValueError( \"Type of inputs should be list or tuple, got", "new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self, prog, graph): func_inputs = {} for", "ValueError(\"No func input available\") self.func_input_stack.pop() def get_func_inputs(self): if len(self.func_input_stack) ==", "tensorflow_passes from coremltools.converters._profile_utils import _profile # TranscriptionContext maintains a map", "tf_name): if tf_name not in self.context: msg = \"TF var", "if inputs[0].name is None: inputs[0].name = tf_placeholder_names[0] # filter out", "input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs) as ssa_func: # Get the", "inputs[idx].channel_first = False self.inputs = tuple(inputs) for inputtype in self.inputs:", "as InputShape from coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil import get_new_symbol", "var %s is added again. Overriding previous value\" logging.info(msg %", "added_inputs) ) for idx, inp in enumerate(inputs): # We set", "else: raise ValueError(error_message) return shape def _get_stack(self, tfssa, root=\"main\"): #", "== -1 else s \\ for s in shape] inputs.append(TensorType(name=inp,", "(which is # needed to figure out shapes changes during", "output is not Placeholder. input_names = [x.name for x in", "not isinstance(inputtype.shape, InputShape): continue if any([isinstance(s, RangeDim) for s in", "in graph if graph[n].op == \"Placeholder\"] placeholder_names = [] if", "tfssa.functions.values(): all_nodes += list(f.graph.keys()) for n in outputs: if self._get_tensor_name(n)", "tf.placeholder(tf.float32, shape=(1,)) # y = tf.placeholder(tf.float32, shape=(1,)) # c =", "inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No", "new_outputs = [] for output, output_name in zip(block.outputs, outputs_name): if", "from coremltools.converters._profile_utils import _profile # TranscriptionContext maintains a map of", "Shape as InputShape from coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil import", "We fill in shapes for user-specified input that doesn't have", "shape] inp.shape = _get_shaping_class(shape) # Extract placeholders that users didn't", "TensorFlow's node name ({}).\" \" Warning: Node added to context", "user_input_names = [inp.name for inp in inputs] for name in", ") for idx, inp in enumerate(inputs): # We set the", "name): error_message = \"Unable to determine the shape of input:", "--> ssa_var available # to the current TF --> tfssa", "is_new_var=True): \"\"\" ssa_vars: list[Var] / tuple[Var] (multiple outputs) or Var", "/ tuple[Var] (multiple outputs) or Var (single_output) is_new_var: True if", ".convert_utils import convert_graph from coremltools.converters.mil.mil import Builder as mb from", "= [get_new_symbol() if s is None or s == -1", "body_block1(%a.x, %b.x) { # %ret_axx = while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) {", "= tf.placeholder(tf.float32, shape=(1,)) # y = tf.placeholder(tf.float32, shape=(1,)) # c", "None, None if node.op == \"while\": func_x = node.attr[\"body_function\"] func_y", "self._get_tensor_name(n) not in output_nodes + all_nodes: raise KeyError('Output node name", "in zip(prog[\"main\"].outputs, self.outputs): if v_o.name != out_name and v_o.name not", "# node name: Placeholder_1 op type: Placeholder inputs: [] #", "for name in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs = convert_graph(self.context, graph,", "[] placeholder_names = tf_placeholder_names # name -> (shape, mil_type) mapping.", "func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs = convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\",", "be present\") if self.graph_stack[0] != \"main\": msg = \"TF root", "shape of input: {}.\" \\ \" Please provide its shape", "outputs = [x if isinstance(x, str) else x.name for x", "inputs: list of TensorType or ImageType, optional, defaults to None.", "is not None: # Check inputs format if not isinstance(inputs,", "= self._get_stack(tfssa, root=\"main\") self.context = TranscriptionContext() self.tensorflow_passes = tensorflow_passes def", "x = tf.placeholder(tf.float32, shape=(1,)) # y = tf.placeholder(tf.float32, shape=(1,)) #", "inputs should be list or tuple of TensorType or ImageType,", "{} new_outputs = [output for output in block.outputs] for i,", "not found in given tensorflow graph. Placeholders in graph are:", "shape = [get_new_symbol() if s is None or s ==", ") if inp.shape is None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) #", "is NOT necessarily a DAG. dep = {x: [] for", "sequentially. self.graph_stack = self._get_stack(tfssa, root=\"main\") self.context = TranscriptionContext() self.tensorflow_passes =", "name not in user_input_names: placeholder_names.append(name) else: inputs = [] placeholder_names", "# Observe that return node `while/Exit` is an output from", "if s is None or s == -1 else s", "NHWC, since NHWC is used # for TF unless GPU", "# list of length 1 # infer outputs if not", "} -> (%ret_ax, %ret_bx) # # During the translation of", "import is_symbolic from coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil import types", "if v_o.name not in name_counts: name_counts[v_o.name] = 1 else: name_counts[v_o.name]", "coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils import", "lambda i, j: (tf.add(i, 1), j) # res = tf.while_loop(c,", "# Copyright (c) 2020, Apple Inc. All rights reserved. #", "least one TF function must be present\") if self.graph_stack[0] !=", "...some ops # } -> (%bool_var1) # body_block1(%a.x, %b.x) {", "msg = \"TF var {} not found in context {}\"", "Stacked functions are translated to nested # blocks in Program,", "--> tfssa transcription. class TranscriptionContext: def __init__(self, name=None): self.name =", "= [] if inputs is not None: # Check inputs", "of input: {}.\" \\ \" Please provide its shape during", "main(%Placeholder: (5,fp32)) { block3() { } -> (%Placeholder) } But", "= lambda i, j: (tf.add(i, 1), j) # res =", "in input_name or output.name == output_name: new_output = output else:", "mapping. shape has type list[int] added_inputs = {} for inp", "= name if name is not None else \"\" self.context", "graph[n].op == \"Placeholder\"] placeholder_names = [] if inputs is not", "ssa_vars: list[Var] / tuple[Var] (multiple outputs) or Var (single_output) is_new_var:", "(single_output) is_new_var: True if ssa_vars are newly created for tf_name.", ") v_o.name = out_name self.check_placeholder_output(prog, self.outputs) @_profile def convert(self): prog", "(%ret_ax, %ret_bx) # # During the translation of cond_block2, we'd", "if graph_name not in self.graphs: msg = \"Graph '{}' not", "input_names = [x.name for x in self.inputs] for v_o, out_name", "TF op's name. # # Example: # # TF code:", "as ssa_func: # Get the input Var for name in", "# body_block2(%a.x.x) { # ...some ops # } -> (%new_a.x.x)", "of str or str, optional, defaults to None. A list", "the cases where placeholder is output. There is a case", "convert_main_graph(self, prog, graph): func_inputs = {} for input_type in self.inputs:", "inputs[0].name is None: inputs[0].name = tf_placeholder_names[0] # filter out those", "# # node name: Placeholder op type: Placeholder inputs: []", "output.name not in input_name or output.name == output_name: new_output =", "figure out shapes changes during iterates) msg = \"TF var", "user_input_names: placeholder_names.append(name) else: inputs = [] placeholder_names = tf_placeholder_names #", "\"Type of inputs should be list or tuple, got {}", "ret = tensor.name return ret.split(\":\")[0] def _validate_outputs(self, tfssa, outputs): if", "to extract the output information from TensorFlow model. \"\"\" self.tfssa", "output in block.outputs] for i, v_o in enumerate(block.outputs): if v_o.name", "from coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils", "+= list(f.graph.keys()) for n in outputs: if self._get_tensor_name(n) not in", "output var: '{}' -> '{}'\".format(v_o.name, out_name) ) v_o.name = out_name", "self.context = TranscriptionContext() self.tensorflow_passes = tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph, name):", "node.attr[\"_output_shapes\"] = [shape] # list of length 1 # infer", "if isinstance(x, str) else x.name for x in outputs] self.outputs", "n in outputs: if self._get_tensor_name(n) not in output_nodes + all_nodes:", "!= ssa_vars.name: msg = ( \"MIL op's name ({}) does", "that return node `while/Exit` is an output from get_tuple, #", "self.context = {} self.graphs = {} # TF loops are", "f in tfssa.functions.values(): output_nodes += list(f.outputs) all_nodes = [] for", "if inp.name not in tf_placeholder_names: raise ValueError( \"Input ({}) provided", "if graph[n].op == \"Placeholder\"] placeholder_names = [] if inputs is", "# ...some ops # } -> (%bool_var2) # body_block2(%a.x.x) {", "== \"while\": func_x = node.attr[\"body_function\"] func_y = node.attr[\"cond_function\"] if func_x", "name. This is needed when the last op doesn't #", "add_graph(self, graph_name, graph): self.graphs[graph_name] = graph def get_graph(self, graph_name): if", "# This is NOT necessarily a DAG. dep = {x:", "input name was not provided\" ) if inp.name not in", "and tf_name != ssa_vars.name: msg = ( \"MIL op's name", "Check inputs format if not isinstance(inputs, (list, tuple)): raise ValueError(", "-1 else s \\ for s in shape] inputs.append(TensorType(name=inp, shape=shape,", "conversion, using \\n\" \\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name, name) if", "from TensorFlow model. \"\"\" self.tfssa = tfssa self.global_type = {}", "in Program, like # # while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x, %b.x)", "out those inputs which is not in tf_placeholder_names inputs =", "if there's only 1 input and 1 placeholder, we match", "Overriding previous value\" logging.info(msg % tf_name) if is_new_var and isinstance(ssa_vars,", "shape def _get_stack(self, tfssa, root=\"main\"): # We're trying to get", "-> '{}'\".format(v_o.name, out_name) ) v_o.name = out_name self.check_placeholder_output(prog, self.outputs) @_profile", "names tf_placeholder_names = [n for n in graph if graph[n].op", "while_loop body twice (which is # needed to figure out", "# node name: Placeholder op type: Placeholder inputs: [] #", "different name than the last TF op's name. # #", "if isinstance(outputs, (tuple, list)) else [outputs] output_nodes = [] for", "for f in tfssa.functions.values(): output_nodes += list(f.outputs) all_nodes = []", "and thus the # last Var would have a different", "= \"Graph '{}' not found in: {}\" raise KeyError(msg.format(graph_name, list(self.graphs.keys())))", "\"TF root graph must be named 'main'. Got {}\" raise", "tuple, got {} instead.\".format( type(inputs) ) ) if not all([isinstance(i,", "%s is added again. Overriding previous value\" logging.info(msg % tf_name)", "func_y and fname not in dep[func_y]: dep[func_y].append(fname) assert len(dep[root]) ==", "is None: inputs[idx].channel_first = False self.inputs = tuple(inputs) for inputtype", "coremltools.converters.mil.mil import Builder as mb from coremltools.converters.mil.mil import Program from", "previous value\" logging.info(msg % tf_name) if is_new_var and isinstance(ssa_vars, Var)", "the same name as the name passed to context.\" )", "[outputs] outputs = [x if isinstance(x, str) else x.name for", "TensorType or ImageType, optional, defaults to None. outputs: list of", "for node in tfssa.functions[fname].graph.values(): func_x, func_y = None, None if", "# # Example: # # TF code: # x =", "for s in inputtype.shape.shape]): continue node = graph[inputtype.name] shape =", "graph must be named 'main'. Got {}\" raise ValueError(msg.format(self.graph_stack[0])) graph", "logging.info( \"Renaming output var: '{}' -> '{}'\".format(v_o.name, out_name) ) v_o.name", "in tf_placeholder_names inputs = [x for x in inputs if", "\"\"\" self.tfssa = tfssa self.global_type = {} self.inputs = None", "inp.name, tf_placeholder_names ) ) if inp.shape is None: shape =", "body_block2(%a.x.x) { # ...some ops # } -> (%new_a.x.x) #", "prog, graph): func_inputs = {} for input_type in self.inputs: func_inputs[input_type.name]", "from while_0 SSA op. We need to # rename `while_0:0`", ") ) if not all([isinstance(i, InputType) for i in inputs]):", "if not provided self._validate_outputs(tfssa, outputs) outputs = main_func.outputs if outputs", "of length 1 # infer outputs if not provided self._validate_outputs(tfssa,", "in dep[func_y]: dep[func_y].append(fname) assert len(dep[root]) == 0 graph_stack = simple_topsort(dep)", "msg = \"Graph '{}' not found in: {}\" raise KeyError(msg.format(graph_name,", "model. \"\"\" self.tfssa = tfssa self.global_type = {} self.inputs =", ") # Special case: if there's only 1 input and", "prog[\"main\"] with block: name_counts = {} new_outputs = [output for", "the block output to Placeholder:0 by inserting an identity \"\"\"", "ret = tensor else: ret = tensor.name return ret.split(\":\")[0] def", "are translated to nested # blocks in Program, like #", "Observe that return node `while/Exit` is an output from get_tuple,", "# node name: while/Exit_1 op type: get_tuple inputs: ['while_0'] #", "shape=(1,)) # c = lambda i, j: \\ # tf.less(tf.math.reduce_mean(i),", "Note: sometimes two outputs are pointing to the same Var,", "type: get_tuple inputs: ['while_0'] # node name: while/Exit_1 op type:", "translation simply unpack a python tuple of Vars # ('while_0:0',", ") ) if inp.shape is None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name)", "tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message = \"Unable to determine", "self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) # Apply TF frontend passes", "if len(self.func_input_stack) == 0: raise ValueError(\"No func input available\") return", "x in self.inputs] for v_o, out_name in zip(prog[\"main\"].outputs, self.outputs): if", "1 # infer outputs if not provided self._validate_outputs(tfssa, outputs) outputs", "[x, y]) # # Resulting nodes (excluding the nodes in", "_profile # TranscriptionContext maintains a map of tf_node.name --> ssa_var", "prog, outputs_name): \"\"\" Handle the cases where placeholder is output.", "for fname in tfssa.functions: for node in tfssa.functions[fname].graph.values(): func_x, func_y", "== 0 graph_stack = simple_topsort(dep) return graph_stack @staticmethod def _get_tensor_name(tensor):", "op's name. # # Example: # # TF code: #", "self.tensorflow_passes = tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message = \"Unable", "self.inputs = None main_func = tfssa.functions[\"main\"] graph = main_func.graph #", "through the graphs. # This is NOT necessarily a DAG.", "func_y = None, None if node.op == \"while\": func_x =", "list(f.outputs) all_nodes = [] for f in tfssa.functions.values(): all_nodes +=", "def __contains__(self, tf_name): return tf_name in self.context class TFConverter: def", "defaults to None. A list of names of the output", "== 1 and len(inputs) == 1: if inputs[0].name is None:", "in self.inputs: if not isinstance(inputtype.shape, InputShape): continue if any([isinstance(s, RangeDim)", "isinstance(ssa_vars, Var) and tf_name != ssa_vars.name: msg = ( \"MIL", "# Special case: if there's only 1 input and 1", "name -> (shape, mil_type) mapping. shape has type list[int] added_inputs", "type: make_tuple inputs: ['Placeholder', # 'Placeholder_1'] # node name: while_0", "passes on Program. These passes are different # from passes", "for inp in main_func.inputs: if inp not in placeholder_names: continue", "a new Var (e.g., get_tuple, Identity etc.), and thus the", "output name. If None, the converter will try to extract", "inp in inputs] for name in tf_placeholder_names: if name not", "to get a order of how to loop through the", "None: inputs[0].name = tf_placeholder_names[0] # filter out those inputs which", "# Note: sometimes two outputs are pointing to the same", "is used # for TF unless GPU is specified as", "0 graph_stack = simple_topsort(dep) return graph_stack @staticmethod def _get_tensor_name(tensor): ret", "the inputs to only Placeholder names tf_placeholder_names = [n for", "None. A list of names of the output nodes or", "returned from while_0 SSA op. We need to # rename", "fill in shapes for user-specified input that doesn't have shape", "create mb.identity for those cases block = prog[\"main\"] with block:", "We need to change the block output to Placeholder:0 by", "outputs is None else outputs outputs = outputs if isinstance(outputs,", "# } -> (%bool_var2) # body_block2(%a.x.x) { # ...some ops", "input_type in self.inputs: func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with", "s for s in inputtype.shape.shape] node.attr[\"_output_shapes\"] = [shape] # list", "from coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types", "tf_placeholder_names[0] # filter out those inputs which is not in", "ValueError( \"Input ({}) provided is not found in given tensorflow", "outputs outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]", "has type list[int] added_inputs = {} for inp in main_func.inputs:", "is like main(%Placeholder: (5,fp32)) { block3() { } -> (%Placeholder)", "not in self.graphs: msg = \"Graph '{}' not found in:", "becomes # stacked functions. Stacked functions are translated to nested", "def unstack_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No func input", "return self.graphs[graph_name] def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self): if len(self.func_input_stack)", "inputs = [x for x in inputs if x.name in", "from .basic_graph_ops import topsort, simple_topsort from .convert_utils import convert_graph from", "for users to find the # output. # Note: only", "tensor else: ret = tensor.name return ret.split(\":\")[0] def _validate_outputs(self, tfssa,", "like main(%Placeholder: (5,fp32)) { block3() { } -> (%Placeholder) }", "for g_name in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) # Apply", "def _validate_outputs(self, tfssa, outputs): if outputs is None: return outputs", "InputShape): continue if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]): continue", "0: raise ValueError(\"No func input available\") return self.func_input_stack[-1] def __getitem__(self,", "graph = main_func.graph # Filter the inputs to only Placeholder", "new_output = mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self, prog, graph):", "outputs to TF's name. This is needed when the last", "\"\" self.context = {} self.graphs = {} # TF loops", "# Check inputs existence if inp.name is None: raise ValueError(", "are represented as functions, so nested loops becomes # stacked", "ValueError( \"Unable to infer input's name or input name was", "not in dep[func_y]: dep[func_y].append(fname) assert len(dep[root]) == 0 graph_stack =", "isinstance(inputs, (list, tuple)): raise ValueError( \"Type of inputs should be", "in tfssa.functions[fname].graph.values(): func_x, func_y = None, None if node.op ==", "= tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\", None) is not None: shape =", "= tuple(inputs) for inputtype in self.inputs: if not isinstance(inputtype.shape, InputShape):", "context must have the same name as the name passed", "else: name_counts[v_o.name] += 1 new_name = v_o.name + \"_duplicate_\" +", "is_tensor from coremltools.converters.mil.mil import types from .basic_graph_ops import topsort, simple_topsort", "converter will try to extract the output information from TensorFlow", "in given tensorflow graph. Placeholders in graph are: {}\".format( inp.name,", "lambda i, j: \\ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b =", "if the output is not Placeholder. input_names = [x.name for", "len(tf_placeholder_names) == 1 and len(inputs) == 1: if inputs[0].name is", "output # Note: sometimes two outputs are pointing to the", "we run conversion sequentially. self.graph_stack = self._get_stack(tfssa, root=\"main\") self.context =", "s \\ for s in shape] inp.shape = _get_shaping_class(shape) #", "is None else outputs outputs = outputs if isinstance(outputs, (tuple,", "root=\"main\"): # We're trying to get a order of how", "TF unless GPU is specified as device. if isinstance(inp, ImageType)", "import types from .basic_graph_ops import topsort, simple_topsort from .convert_utils import", "self.inputs] for v_o, out_name in zip(prog[\"main\"].outputs, self.outputs): if v_o.name !=", "name=None): self.name = name if name is not None else", "= ssa_vars def add_graph(self, graph_name, graph): self.graphs[graph_name] = graph def", "= mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self, prog, graph): func_inputs", "sometimes two outputs are pointing to the same Var, we", "`while/Exit` in order for users to find the # output.", "dtype=dtype)) added_inputs[inp] = (shape, dtype) if len(added_inputs) > 0: logging.info(", "# last Var would have a different name than the", "# cond_block2(%a.x.x) { # ...some ops # } -> (%bool_var2)", "outputs = main_func.outputs if outputs is None else outputs outputs", "self.context class TFConverter: def __init__(self, tfssa, inputs=None, outputs=None, **kwargs): \"\"\"", "({}) provided is not found in given tensorflow graph. Placeholders", "Placeholder. input_names = [x.name for x in self.inputs] for v_o,", "\"while\": func_x = node.attr[\"body_function\"] func_y = node.attr[\"cond_function\"] if func_x and", "used # for TF unless GPU is specified as device.", "inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp] = (shape, dtype) if len(added_inputs) >", "self.outputs) @_profile def convert(self): prog = Program() if len(self.graph_stack) ==", "y = tf.placeholder(tf.float32, shape=(1,)) # c = lambda i, j:", "LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types import", "== 0: raise ValueError(\"At least one TF function must be", "0: raise ValueError(\"No func input available\") self.func_input_stack.pop() def get_func_inputs(self): if", "to # rename `while_0:0` to `while/Exit` in order for users", "self.graph_stack = self._get_stack(tfssa, root=\"main\") self.context = TranscriptionContext() self.tensorflow_passes = tensorflow_passes", "graph_name): if graph_name not in self.graphs: msg = \"Graph '{}'", "\"Adding Input not specified by users: '{}'\".format( added_inputs) ) for", "program is like main(%Placeholder: (5,fp32)) { block3() { } ->", "RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types import Shape as InputShape from", "[type(i) for i in inputs] ) ) # Special case:", "% tf_name) if is_new_var and isinstance(ssa_vars, Var) and tf_name !=", "names of the output nodes or a str for single", "= tensor else: ret = tensor.name return ret.split(\":\")[0] def _validate_outputs(self,", "is output. There is a case where the program is", "new Var (e.g., get_tuple, Identity etc.), and thus the #", "\"{}\" does exist.'.format(n)) def check_placeholder_output(self, prog, outputs_name): \"\"\" Handle the", "the output information from TensorFlow model. \"\"\" self.tfssa = tfssa", "try to extract the output information from TensorFlow model. \"\"\"", "output_nodes += list(f.outputs) all_nodes = [] for f in tfssa.functions.values():", "to Placeholder:0 by inserting an identity \"\"\" block = prog[\"main\"]", "created for tf_name. \"\"\" if tf_name in self.context: # Overriding", "are different # from passes applied to tfssa. self.tensorflow_passes(prog) return", "inputtype in self.inputs: if not isinstance(inputtype.shape, InputShape): continue if any([isinstance(s,", "if node.op == \"while\": func_x = node.attr[\"body_function\"] func_y = node.attr[\"cond_function\"]", "https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types import ( InputType, TensorType, ImageType,", "get_tuple, Identity etc.), and thus the # last Var would", "...some ops # } -> (%new_a.x.x) # } -> (%ret_axx)", "for n in graph if graph[n].op == \"Placeholder\"] placeholder_names =", "None if isinstance(tensor, str): ret = tensor else: ret =", "'{}' -> '{}'\".format(v_o.name, out_name) ) v_o.name = out_name self.check_placeholder_output(prog, self.outputs)", "# %ret_axx = while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) { # ...some ops", "if not all([isinstance(i, InputType) for i in inputs]): raise ValueError(", "in self.inputs: func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs)", "`while_0:0` to `while/Exit` in order for users to find the", "{}\" raise KeyError(msg.format(tf_name, self.name)) return self.context[tf_name] def __contains__(self, tf_name): return", "identity \"\"\" block = prog[\"main\"] input_name = [x.name for x", "We're trying to get a order of how to loop", "We would like a stack so that we run conversion", "an output from get_tuple, # which in our translation simply", "not found in: {}\" raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name] def", "elif tfgraph[name].attr.get(\"_output_shapes\", None) is not None: shape = tfgraph[name].attr[\"_output_shapes\"][0] if", "new_name = v_o.name + \"_duplicate_\" + str(name_counts[v_o.name]) x = mb.identity(x=v_o,", "DAG. dep = {x: [] for x in tfssa.functions} for", "tf_placeholder_names ) ) if inp.shape is None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph,", "1 input and 1 placeholder, we match them. if len(tf_placeholder_names)", "if func_y and fname not in dep[func_y]: dep[func_y].append(fname) assert len(dep[root])", "inputs[idx].channel_first is None: inputs[idx].channel_first = False self.inputs = tuple(inputs) for", "= tf.while_loop(c, b, [x, y]) # # Resulting nodes (excluding", "%b.x) { # ...some ops # } -> (%bool_var1) #", "users didn't specify. user_input_names = [inp.name for inp in inputs]", "def convert(self): prog = Program() if len(self.graph_stack) == 0: raise", "\"Unable to infer input's name or input name was not", "self.inputs: if not isinstance(inputtype.shape, InputShape): continue if any([isinstance(s, RangeDim) for", "= \"TF var {} not found in context {}\" raise", "ret = None if isinstance(tensor, str): ret = tensor else:", "name ({}) does not match TensorFlow's node name ({}).\" \"", "ImageType) and inputs[idx].channel_first is None: inputs[idx].channel_first = False self.inputs =", "ops # } -> (%bool_var1) # body_block1(%a.x, %b.x) { #", "else s for s in inputtype.shape.shape] node.attr[\"_output_shapes\"] = [shape] #", "or tuple of TensorType or ImageType, got {} instead.\".format( [type(i)", "done. self.func_input_stack = [] # list of tuple[Var] def add(self,", "twice (which is # needed to figure out shapes changes", "graph[inputtype.name] shape = [-1 if is_symbolic(s) else s for s", "None) is not None: shape = tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\", None)", "# # (%a.x.x,) # (%a.x, %b.x) # # where [%a.x.x]", "# c = lambda i, j: \\ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j))", "name: while_0 op type: while inputs: ['make_input_0'] # node name:", "v_o.name + \"_duplicate_\" + str(name_counts[v_o.name]) x = mb.identity(x=v_o, name=new_name) new_outputs[i]", "inputs format if not isinstance(inputs, (list, tuple)): raise ValueError( \"Type", "provided is not found in given tensorflow graph. Placeholders in", "tf_name in self.context: # Overriding allow us to translate while_loop", "{} self.graphs = {} # TF loops are represented as", "__getitem__(self, tf_name): if tf_name not in self.context: msg = \"TF", "= [shape] # list of length 1 # infer outputs", "func_y = node.attr[\"cond_function\"] if func_x and fname not in dep[func_x]:", "TensorFlow IR. inputs: list of TensorType or ImageType, optional, defaults", "loops are represented as functions, so nested loops becomes #", "ImageType, optional, defaults to None. outputs: list of str or", "from get_tuple, # which in our translation simply unpack a", "= TranscriptionContext() self.tensorflow_passes = tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message", "mb from coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil import Function from", "node name: Placeholder op type: Placeholder inputs: [] # node", "x in tfssa.functions} for fname in tfssa.functions: for node in", "case: if there's only 1 input and 1 placeholder, we", "passed to context.\" ) raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] = ssa_vars", "shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class does not accept -1", "inputs: ['make_input_0'] # node name: while/Exit op type: get_tuple inputs:", "inp.shape is None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class does", "graph): self.graphs[graph_name] = graph def get_graph(self, graph_name): if graph_name not", "unstack_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No func input available\")", "var {} not found in context {}\" raise KeyError(msg.format(tf_name, self.name))", "== output_name: new_output = output else: new_output = mb.identity(x=output, name=output_name)", "get_tuple inputs: ['while_0'] # node name: while/Exit_1 op type: get_tuple", "be named 'main'. Got {}\" raise ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions[\"main\"].graph", "\"\"\" block = prog[\"main\"] input_name = [x.name for x in", "shapes for user-specified input that doesn't have shape for inp", "op type: Placeholder inputs: [] # node name: make_input_0 op", "Inc. All rights reserved. # # Use of this source", "once cond_block2 is done. self.func_input_stack = [] # list of", "i in inputs]): raise ValueError( \"Type of inputs should be", "would be unstacked once cond_block2 is done. self.func_input_stack = []", "outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs] outputs", "not accept -1 or None dimension. shape = [get_new_symbol() if", "str) else x.name for x in outputs] self.outputs = outputs", "msg = \"TF var %s is added again. Overriding previous", "name. If None, the converter will try to extract the", "# We set the default image format in TF as", "KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name] def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self):", "output_name in zip(block.outputs, outputs_name): if output.name not in input_name or", "coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types import", "function must be present\") if self.graph_stack[0] != \"main\": msg =", "those inputs which is not in tf_placeholder_names inputs = [x", "graph) # Apply TF frontend passes on Program. These passes", "TranscriptionContext() self.tensorflow_passes = tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message =", "s == -1 else s \\ for s in shape]", "(%bool_var1) # body_block1(%a.x, %b.x) { # %ret_axx = while_loop(loop_vars=(%a.x,)) #", "not in tf_placeholder_names: raise ValueError( \"Input ({}) provided is not", "not in input_names: logging.info( \"Renaming output var: '{}' -> '{}'\".format(v_o.name,", "or None dimension. shape = [get_new_symbol() if s is None", "TensorType or ImageType, got {} instead.\".format( [type(i) for i in", "['while_0'] # # Observe that return node `while/Exit` is an", "is_symbolic(s) else s for s in inputtype.shape.shape] node.attr[\"_output_shapes\"] = [shape]", "list or tuple, got {} instead.\".format( type(inputs) ) ) if", "code is governed by a BSD-3-clause license that can be", "...some ops # } -> (%bool_var2) # body_block2(%a.x.x) { #", "None else \"\" self.context = {} self.graphs = {} #", "inputs which is not in tf_placeholder_names inputs = [x for", "{}\".format( inp.name, tf_placeholder_names ) ) if inp.shape is None: shape", "type: get_tuple inputs: ['while_0'] # # Observe that return node", "ops # } -> (%new_a.x.x) # } -> (%ret_axx) #", "optional, defaults to None. A list of names of the", "tfssa self.global_type = {} self.inputs = None main_func = tfssa.functions[\"main\"]", "tuple[Var] def add(self, tf_name, ssa_vars, is_new_var=True): \"\"\" ssa_vars: list[Var] /", "tfssa: TensorFlow IR. inputs: list of TensorType or ImageType, optional,", "check_placeholder_output(self, prog, outputs_name): \"\"\" Handle the cases where placeholder is", "for i in inputs]): raise ValueError( \"Type of inputs should", "would like a stack so that we run conversion sequentially.", "[] for output, output_name in zip(block.outputs, outputs_name): if output.name not", "n in graph if graph[n].op == \"Placeholder\"] placeholder_names = []", "j: \\ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b = lambda i,", "functions. Stacked functions are translated to nested # blocks in", "reserved. # # Use of this source code is governed", "prog.add_function(\"main\", ssa_func) # check duplicate output # Note: sometimes two", "tf_name): return tf_name in self.context class TFConverter: def __init__(self, tfssa,", "# Use of this source code is governed by a", "check duplicate output # Note: sometimes two outputs are pointing", "since NHWC is used # for TF unless GPU is", "self.tfssa.functions[\"main\"].graph for g_name in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) #", "get a order of how to loop through the graphs.", "graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func) # check duplicate output #", "return graph_stack @staticmethod def _get_tensor_name(tensor): ret = None if isinstance(tensor,", "list)) else [outputs] outputs = [x if isinstance(x, str) else", "1), j) # res = tf.while_loop(c, b, [x, y]) #", "specified as device. if isinstance(inp, ImageType) and inputs[idx].channel_first is None:", "continue node = graph[inputtype.name] shape = [-1 if is_symbolic(s) else", "s in shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp] = (shape, dtype)", "But self.outputs = [\"Placeholder:0\"] We need to change the block", "its shape during conversion, using \\n\" \\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_)", "__init__(self, tfssa, inputs=None, outputs=None, **kwargs): \"\"\" tfssa: TensorFlow IR. inputs:", "block = prog[\"main\"] input_name = [x.name for x in list(block.inputs.values())]", "there's only 1 input and 1 placeholder, we match them.", "get_tuple, # which in our translation simply unpack a python", "raise ValueError(error_message) else: raise ValueError(error_message) return shape def _get_stack(self, tfssa,", "make_input_0 op type: make_tuple inputs: ['Placeholder', # 'Placeholder_1'] # node", "= lambda i, j: \\ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b", "list[int] added_inputs = {} for inp in main_func.inputs: if inp", "= graph[inp] dtype = node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape", "# blocks in Program, like # # while_loop(loop_vars=(%a, %b)) #", "and fname not in dep[func_y]: dep[func_y].append(fname) assert len(dep[root]) == 0", "for s in shape] inp.shape = _get_shaping_class(shape) # Extract placeholders", "ops using %ret_a # } -> (%ret_ax, %ret_bx) # #", "+= 1 new_name = v_o.name + \"_duplicate_\" + str(name_counts[v_o.name]) x", "import convert_graph from coremltools.converters.mil.mil import Builder as mb from coremltools.converters.mil.mil", "= False self.inputs = tuple(inputs) for inputtype in self.inputs: if", "node = graph[inputtype.name] shape = [-1 if is_symbolic(s) else s", "\"Input ({}) provided is not found in given tensorflow graph.", "Var would have a different name than the last TF", "= node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape = [get_new_symbol() if", "to `while/Exit` in order for users to find the #", "Placeholder names tf_placeholder_names = [n for n in graph if", "# name -> (shape, mil_type) mapping. shape has type list[int]", "in shapes for user-specified input that doesn't have shape for", "# (%a.x.x,) # (%a.x, %b.x) # # where [%a.x.x] would", "[x.name for x in self.inputs] for v_o, out_name in zip(prog[\"main\"].outputs,", "new_outputs = [output for output in block.outputs] for i, v_o", "placeholder_names.append(name) else: inputs = [] placeholder_names = tf_placeholder_names # name", "image format in TF as NHWC, since NHWC is used", "frontend passes on Program. These passes are different # from", "tf_name, ssa_vars, is_new_var=True): \"\"\" ssa_vars: list[Var] / tuple[Var] (multiple outputs)", "input Var for name in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs =", "b, [x, y]) # # Resulting nodes (excluding the nodes", "should # create mb.identity for those cases block = prog[\"main\"]", "isinstance(outputs, (tuple, list)) else [outputs] output_nodes = [] for f", "shape=(1,)) # y = tf.placeholder(tf.float32, shape=(1,)) # c = lambda", "graph if graph[n].op == \"Placeholder\"] placeholder_names = [] if inputs", "continue node = graph[inp] dtype = node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph,", "outputs are pointing to the same Var, we should #", "raise ValueError(\"No func input available\") return self.func_input_stack[-1] def __getitem__(self, tf_name):", "Input not specified by users: '{}'\".format( added_inputs) ) for idx,", "run conversion sequentially. self.graph_stack = self._get_stack(tfssa, root=\"main\") self.context = TranscriptionContext()", "in shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp] = (shape, dtype) if", "coremltools.converters.mil.input_types import ( InputType, TensorType, ImageType, RangeDim, _get_shaping_class, ) from", "self.check_placeholder_output(prog, self.outputs) @_profile def convert(self): prog = Program() if len(self.graph_stack)", "InputType) for i in inputs]): raise ValueError( \"Type of inputs", "[] for f in tfssa.functions.values(): output_nodes += list(f.outputs) all_nodes =", "if self._get_tensor_name(n) not in output_nodes + all_nodes: raise KeyError('Output node", "must have the same name as the name passed to", "# Filter the inputs to only Placeholder names tf_placeholder_names =", "list[Var] / tuple[Var] (multiple outputs) or Var (single_output) is_new_var: True", "Check inputs existence if inp.name is None: raise ValueError( \"Unable", "ssa_vars are newly created for tf_name. \"\"\" if tf_name in", "would have a different name than the last TF op's", "one TF function must be present\") if self.graph_stack[0] != \"main\":", "tf_name) if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name:", "rename `while_0:0` to `while/Exit` in order for users to find", "to translate while_loop body twice (which is # needed to", "was not provided\" ) if inp.name not in tf_placeholder_names: raise", "return node `while/Exit` is an output from get_tuple, # which", "# Get the input Var for name in func_inputs.keys(): self.context.add(name,", "the output is not Placeholder. input_names = [x.name for x", "[n for n in graph if graph[n].op == \"Placeholder\"] placeholder_names", "True if ssa_vars are newly created for tf_name. \"\"\" if", "of inputs should be list or tuple of TensorType or", "0: logging.info( \"Adding Input not specified by users: '{}'\".format( added_inputs)", "c = lambda i, j: \\ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) #", "if shape is None: raise ValueError(error_message) else: raise ValueError(error_message) return", "import Program from coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes import tensorflow_passes", "defaults to None. outputs: list of str or str, optional,", "TF --> tfssa transcription. class TranscriptionContext: def __init__(self, name=None): self.name", "= tfgraph[name].attr[\"_output_shapes\"][0] if shape is None: raise ValueError(error_message) else: raise", "in tf_placeholder_names] # We fill in shapes for user-specified input", "the output if the output is not Placeholder. input_names =", "SSA op. We need to # rename `while_0:0` to `while/Exit`", "input that doesn't have shape for inp in inputs: #", "Get the input Var for name in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name])", "= prog[\"main\"] with block: name_counts = {} new_outputs = [output", "= graph def get_graph(self, graph_name): if graph_name not in self.graphs:", "prog.set_main_input_types(self.inputs) with Function(func_inputs) as ssa_func: # Get the input Var", "convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func) # check duplicate output", "not None: shape = tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\", None) is not", "the same Var, we should # create mb.identity for those", "or ImageType, got {} instead.\".format( [type(i) for i in inputs]", "if isinstance(outputs, (tuple, list)) else [outputs] outputs = [x if", "name than the last TF op's name. # # Example:", "} -> (%ret_axx) # ....some ops using %ret_a # }", "inputs: ['while_0'] # node name: while/Exit_1 op type: get_tuple inputs:", "All rights reserved. # # Use of this source code", "dep[func_y]: dep[func_y].append(fname) assert len(dep[root]) == 0 graph_stack = simple_topsort(dep) return", "prog = Program() if len(self.graph_stack) == 0: raise ValueError(\"At least", "tfgraph[name].attr.get(\"_output_shapes\", None) is not None: shape = tfgraph[name].attr[\"_output_shapes\"][0] if shape", "RangeDim) for s in inputtype.shape.shape]): continue node = graph[inputtype.name] shape", "s in inputtype.shape.shape]): continue node = graph[inputtype.name] shape = [-1", "i in inputs] ) ) # Special case: if there's", "optional, defaults to None. outputs: list of str or str,", "j) # res = tf.while_loop(c, b, [x, y]) # #", "is done. self.func_input_stack = [] # list of tuple[Var] def", "# # Use of this source code is governed by", "if len(self.graph_stack) == 0: raise ValueError(\"At least one TF function", "for user-specified input that doesn't have shape for inp in", "inputs: [] # node name: make_input_0 op type: make_tuple inputs:", "while_0 SSA op. We need to # rename `while_0:0` to", "(%a.x, %b.x) # # where [%a.x.x] would be unstacked once", "[shape] # list of length 1 # infer outputs if", "where placeholder is output. There is a case where the", "thus the # last Var would have a different name", "# output. # Note: only rename the output if the", "in tfssa.functions.values(): all_nodes += list(f.graph.keys()) for n in outputs: if", "named 'main'. Got {}\" raise ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions[\"main\"].graph for", "\"MIL op's name ({}) does not match TensorFlow's node name", "v_o.name not in name_counts: name_counts[v_o.name] = 1 else: name_counts[v_o.name] +=", "same Var, we should # create mb.identity for those cases", "[-1 if is_symbolic(s) else s for s in inputtype.shape.shape] node.attr[\"_output_shapes\"]", "= prog[\"main\"] input_name = [x.name for x in list(block.inputs.values())] with", "= {x: [] for x in tfssa.functions} for fname in", "outputs] self.outputs = outputs # We would like a stack", "Node added to context must have the same name as", "tf.while_loop(c, b, [x, y]) # # Resulting nodes (excluding the", "# # while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x, %b.x) { # ...some", "graph def get_graph(self, graph_name): if graph_name not in self.graphs: msg", "as mb from coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil import Function", "= out_name self.check_placeholder_output(prog, self.outputs) @_profile def convert(self): prog = Program()", "be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause", "self.inputs = tuple(inputs) for inputtype in self.inputs: if not isinstance(inputtype.shape,", "block.outputs] for i, v_o in enumerate(block.outputs): if v_o.name not in", "# # Observe that return node `while/Exit` is an output", "= [x.name for x in self.inputs] for v_o, out_name in", "if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]): continue node =", "for those cases block = prog[\"main\"] with block: name_counts =", "name: while/Exit op type: get_tuple inputs: ['while_0'] # node name:", "for x in tfssa.functions} for fname in tfssa.functions: for node", "self.global_type = {} self.inputs = None main_func = tfssa.functions[\"main\"] graph", "not None: shape = tfgraph[name].attr[\"_output_shapes\"][0] if shape is None: raise", "# filter out those inputs which is not in tf_placeholder_names", "return tf_name in self.context class TFConverter: def __init__(self, tfssa, inputs=None,", "_get_shaping_class(shape) # Extract placeholders that users didn't specify. user_input_names =", "input_name = [x.name for x in list(block.inputs.values())] with block: new_outputs", "the # last Var would have a different name than", "# b = lambda i, j: (tf.add(i, 1), j) #", "0: raise ValueError(\"At least one TF function must be present\")", "ops # } -> (%bool_var2) # body_block2(%a.x.x) { # ...some", "self.convert_main_graph(prog, graph) # Apply TF frontend passes on Program. These", "get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types import is_tensor from", "tf_placeholder_names # name -> (shape, mil_type) mapping. shape has type", "Resulting nodes (excluding the nodes in while loop cond &", "our translation simply unpack a python tuple of Vars #", "coremltools.converters._profile_utils import _profile # TranscriptionContext maintains a map of tf_node.name", "stacked functions. Stacked functions are translated to nested # blocks", "+ str(name_counts[v_o.name]) x = mb.identity(x=v_o, name=new_name) new_outputs[i] = x block.set_outputs(new_outputs)", "all_nodes = [] for f in tfssa.functions.values(): all_nodes += list(f.graph.keys())", "} -> (%new_a.x.x) # } -> (%ret_axx) # ....some ops", "in outputs: if self._get_tensor_name(n) not in output_nodes + all_nodes: raise", "only 1 input and 1 placeholder, we match them. if", "tf_placeholder_names inputs = [x for x in inputs if x.name", "with block: new_outputs = [] for output, output_name in zip(block.outputs,", "TranscriptionContext maintains a map of tf_node.name --> ssa_var available #", "# Check inputs format if not isinstance(inputs, (list, tuple)): raise", "None. outputs: list of str or str, optional, defaults to", "does exist.'.format(n)) def check_placeholder_output(self, prog, outputs_name): \"\"\" Handle the cases", "None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class does not accept", "isinstance(x, str) else x.name for x in outputs] self.outputs =", "name_counts[v_o.name] += 1 new_name = v_o.name + \"_duplicate_\" + str(name_counts[v_o.name])", "= [] # list of tuple[Var] def add(self, tf_name, ssa_vars,", "(tuple, list)) else [outputs] outputs = [x if isinstance(x, str)", "TF as NHWC, since NHWC is used # for TF", "not in dep[func_x]: dep[func_x].append(fname) if func_y and fname not in", "Var, we should # create mb.identity for those cases block", "of inputs should be list or tuple, got {} instead.\".format(", "does not accept -1 or None dimension. shape = [get_new_symbol()", "block = prog[\"main\"] with block: name_counts = {} new_outputs =", "node.attr[\"body_function\"] func_y = node.attr[\"cond_function\"] if func_x and fname not in", "name if name is not None else \"\" self.context =", "func_x, func_y = None, None if node.op == \"while\": func_x", "different # from passes applied to tfssa. self.tensorflow_passes(prog) return prog", "_get_tensor_name(tensor): ret = None if isinstance(tensor, str): ret = tensor", "a DAG. dep = {x: [] for x in tfssa.functions}", "list(self.graphs.keys()))) return self.graphs[graph_name] def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self): if", "str, optional, defaults to None. A list of names of", "in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) # Apply TF frontend", "outputs = convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func) # check", "mb.identity(x=v_o, name=new_name) new_outputs[i] = x block.set_outputs(new_outputs) # Rename outputs to", "placeholders that users didn't specify. user_input_names = [inp.name for inp", "inputs to only Placeholder names tf_placeholder_names = [n for n", "func_inputs = {} for input_type in self.inputs: func_inputs[input_type.name] = mb.placeholder(", "raise KeyError(msg.format(tf_name, self.name)) return self.context[tf_name] def __contains__(self, tf_name): return tf_name", "if outputs is None else outputs outputs = outputs if", "exist.'.format(n)) def check_placeholder_output(self, prog, outputs_name): \"\"\" Handle the cases where", "changes during iterates) msg = \"TF var %s is added", "None: inputs[idx].channel_first = False self.inputs = tuple(inputs) for inputtype in", "Program, like # # while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x, %b.x) {", "represented as functions, so nested loops becomes # stacked functions.", "{ # ...some ops # } -> (%bool_var2) # body_block2(%a.x.x)", "cond_block1(%a.x, %b.x) { # ...some ops # } -> (%bool_var1)", "# } -> (%bool_var1) # body_block1(%a.x, %b.x) { # %ret_axx", "as device. if isinstance(inp, ImageType) and inputs[idx].channel_first is None: inputs[idx].channel_first", "(%bool_var2) # body_block2(%a.x.x) { # ...some ops # } ->", "def add(self, tf_name, ssa_vars, is_new_var=True): \"\"\" ssa_vars: list[Var] / tuple[Var]", "in tfssa.functions.values(): output_nodes += list(f.outputs) all_nodes = [] for f", "return outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]", "def check_placeholder_output(self, prog, outputs_name): \"\"\" Handle the cases where placeholder", "-1 or None dimension. shape = [get_new_symbol() if s is", "during conversion, using \\n\" \\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name, name)", "This is NOT necessarily a DAG. dep = {x: []", "),])\".format(name, name) if tfgraph[name].attr.get(\"shape\", None) is not None: shape =", "device. if isinstance(inp, ImageType) and inputs[idx].channel_first is None: inputs[idx].channel_first =", "x.name for x in outputs] self.outputs = outputs # We", "shape = [-1 if is_symbolic(s) else s for s in", "None: return outputs = outputs if isinstance(outputs, (tuple, list)) else", "is specified as device. if isinstance(inp, ImageType) and inputs[idx].channel_first is", "while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) { # ...some ops # } ->", "placeholder, we match them. if len(tf_placeholder_names) == 1 and len(inputs)", "Program() if len(self.graph_stack) == 0: raise ValueError(\"At least one TF", "(list, tuple)): raise ValueError( \"Type of inputs should be list", "BSD-3-clause license that can be # found in the LICENSE.txt", "= 1 else: name_counts[v_o.name] += 1 new_name = v_o.name +", "unless GPU is specified as device. if isinstance(inp, ImageType) and", "-> (%bool_var2) # body_block2(%a.x.x) { # ...some ops # }", "= ( \"MIL op's name ({}) does not match TensorFlow's", "-> (%Placeholder) } But self.outputs = [\"Placeholder:0\"] We need to", "Overriding allow us to translate while_loop body twice (which is", "(%Placeholder) } But self.outputs = [\"Placeholder:0\"] We need to change", "\"TF var {} not found in context {}\" raise KeyError(msg.format(tf_name,", "TF code: # x = tf.placeholder(tf.float32, shape=(1,)) # y =", "s is None or s == -1 else s \\", "the translation of cond_block2, we'd have func_input_stack # # (%a.x.x,)", "fname in tfssa.functions: for node in tfssa.functions[fname].graph.values(): func_x, func_y =", "= graph[inputtype.name] shape = [-1 if is_symbolic(s) else s for", "dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs) as ssa_func: # Get the input", "match TensorFlow's node name ({}).\" \" Warning: Node added to", "output_nodes + all_nodes: raise KeyError('Output node name \"{}\" does exist.'.format(n))", "isinstance(inp, ImageType) and inputs[idx].channel_first is None: inputs[idx].channel_first = False self.inputs", "only rename the output if the output is not Placeholder.", "by a BSD-3-clause license that can be # found in", "available\") self.func_input_stack.pop() def get_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No", "node.attr[\"cond_function\"] if func_x and fname not in dep[func_x]: dep[func_x].append(fname) if", "self.context: msg = \"TF var {} not found in context", "This is needed when the last op doesn't # generate", "= [x for x in inputs if x.name in tf_placeholder_names]", "tfssa transcription. class TranscriptionContext: def __init__(self, name=None): self.name = name", "all_nodes += list(f.graph.keys()) for n in outputs: if self._get_tensor_name(n) not", "Special case: if there's only 1 input and 1 placeholder,", "output, output_name in zip(block.outputs, outputs_name): if output.name not in input_name", "in while loop cond & body): # # node name:", "in inputs] ) ) # Special case: if there's only", "users: '{}'\".format( added_inputs) ) for idx, inp in enumerate(inputs): #", "for output in block.outputs] for i, v_o in enumerate(block.outputs): if", "tf_placeholder_names] # We fill in shapes for user-specified input that", "# } -> (%new_a.x.x) # } -> (%ret_axx) # ....some", "%ret_a # } -> (%ret_ax, %ret_bx) # # During the", "from coremltools.converters.mil.input_types import ( InputType, TensorType, ImageType, RangeDim, _get_shaping_class, )", "KeyError(msg.format(tf_name, self.name)) return self.context[tf_name] def __contains__(self, tf_name): return tf_name in", "Placeholder:0 by inserting an identity \"\"\" block = prog[\"main\"] input_name", "output.name == output_name: new_output = output else: new_output = mb.identity(x=output,", "{} # TF loops are represented as functions, so nested", "in TF as NHWC, since NHWC is used # for", "self.tfssa = tfssa self.global_type = {} self.inputs = None main_func", "None, the converter will try to extract the output information", "op. We need to # rename `while_0:0` to `while/Exit` in", "'main'. Got {}\" raise ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions[\"main\"].graph for g_name", "maintains a map of tf_node.name --> ssa_var available # to", "TF's name. This is needed when the last op doesn't", "simple_topsort(dep) return graph_stack @staticmethod def _get_tensor_name(tensor): ret = None if", "added again. Overriding previous value\" logging.info(msg % tf_name) if is_new_var", "msg = ( \"MIL op's name ({}) does not match", "coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import", "Var for name in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs = convert_graph(self.context,", "set the default image format in TF as NHWC, since", "body): # # node name: Placeholder op type: Placeholder inputs:", "== 0: raise ValueError(\"No func input available\") self.func_input_stack.pop() def get_func_inputs(self):", "is added again. Overriding previous value\" logging.info(msg % tf_name) if", "node name ({}).\" \" Warning: Node added to context must", "in self.context: # Overriding allow us to translate while_loop body", "tf.math.reduce_mean(j)) # b = lambda i, j: (tf.add(i, 1), j)", "{} for inp in main_func.inputs: if inp not in placeholder_names:", "= outputs if isinstance(outputs, (tuple, list)) else [outputs] outputs =", "= \"Unable to determine the shape of input: {}.\" \\", "IR. inputs: list of TensorType or ImageType, optional, defaults to", "graphs. # This is NOT necessarily a DAG. dep =", "in context {}\" raise KeyError(msg.format(tf_name, self.name)) return self.context[tf_name] def __contains__(self,", "{} instead.\".format( [type(i) for i in inputs] ) ) #", "Var) and tf_name != ssa_vars.name: msg = ( \"MIL op's", "\\ for s in shape] inp.shape = _get_shaping_class(shape) # Extract", "raise ValueError(\"No func input available\") self.func_input_stack.pop() def get_func_inputs(self): if len(self.func_input_stack)", "op's name ({}) does not match TensorFlow's node name ({}).\"", "cond_block2 is done. self.func_input_stack = [] # list of tuple[Var]", "out_name) ) v_o.name = out_name self.check_placeholder_output(prog, self.outputs) @_profile def convert(self):", "name \"{}\" does exist.'.format(n)) def check_placeholder_output(self, prog, outputs_name): \"\"\" Handle", "} -> (%bool_var2) # body_block2(%a.x.x) { # ...some ops #", "and inputs[idx].channel_first is None: inputs[idx].channel_first = False self.inputs = tuple(inputs)", "dimension. shape = [get_new_symbol() if s is None or s", "the shape of input: {}.\" \\ \" Please provide its", "Identity etc.), and thus the # last Var would have", "root=\"main\") self.context = TranscriptionContext() self.tensorflow_passes = tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph,", "got {} instead.\".format( type(inputs) ) ) if not all([isinstance(i, InputType)", "and len(inputs) == 1: if inputs[0].name is None: inputs[0].name =", "the last TF op's name. # # Example: # #", "list of TensorType or ImageType, optional, defaults to None. outputs:", "allow us to translate while_loop body twice (which is #", "__contains__(self, tf_name): return tf_name in self.context class TFConverter: def __init__(self,", "(e.g., get_tuple, Identity etc.), and thus the # last Var", "to TF's name. This is needed when the last op", "ValueError(error_message) else: raise ValueError(error_message) return shape def _get_stack(self, tfssa, root=\"main\"):", "for x in self.inputs] for v_o, out_name in zip(prog[\"main\"].outputs, self.outputs):", "raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name] def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def", "= \"TF var %s is added again. Overriding previous value\"", "shape=shape, dtype=dtype)) added_inputs[inp] = (shape, dtype) if len(added_inputs) > 0:", "current TF --> tfssa transcription. class TranscriptionContext: def __init__(self, name=None):", "x = mb.identity(x=v_o, name=new_name) new_outputs[i] = x block.set_outputs(new_outputs) # Rename", "is_new_var: True if ssa_vars are newly created for tf_name. \"\"\"", "None: shape = tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\", None) is not None:", "for s in inputtype.shape.shape] node.attr[\"_output_shapes\"] = [shape] # list of", "are newly created for tf_name. \"\"\" if tf_name in self.context:", ".ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils import _profile # TranscriptionContext maintains", "infer input's name or input name was not provided\" )", "nodes (excluding the nodes in while loop cond & body):", "= {} new_outputs = [output for output in block.outputs] for", "node name: while/Exit_1 op type: get_tuple inputs: ['while_0'] # #", "and 1 placeholder, we match them. if len(tf_placeholder_names) == 1", "from coremltools.converters.mil.mil import types from .basic_graph_ops import topsort, simple_topsort from", "# } -> (%ret_axx) # ....some ops using %ret_a #", "inputs: ['while_0'] # # Observe that return node `while/Exit` is", "transcription. class TranscriptionContext: def __init__(self, name=None): self.name = name if", "not Placeholder. input_names = [x.name for x in self.inputs] for", "len(inputs) == 1: if inputs[0].name is None: inputs[0].name = tf_placeholder_names[0]", "\"_duplicate_\" + str(name_counts[v_o.name]) x = mb.identity(x=v_o, name=new_name) new_outputs[i] = x", "as NHWC, since NHWC is used # for TF unless", "convert(self): prog = Program() if len(self.graph_stack) == 0: raise ValueError(\"At", "== -1 else s \\ for s in shape] inp.shape", "error_message = \"Unable to determine the shape of input: {}.\"", "value\" logging.info(msg % tf_name) if is_new_var and isinstance(ssa_vars, Var) and", "so that we run conversion sequentially. self.graph_stack = self._get_stack(tfssa, root=\"main\")", "Placeholder_1 op type: Placeholder inputs: [] # node name: make_input_0", "output_name: new_output = output else: new_output = mb.identity(x=output, name=output_name) new_outputs.append(new_output)", "(shape, mil_type) mapping. shape has type list[int] added_inputs = {}", "name was not provided\" ) if inp.name not in tf_placeholder_names:", "root graph must be named 'main'. Got {}\" raise ValueError(msg.format(self.graph_stack[0]))", "inputs: # Check inputs existence if inp.name is None: raise", "that doesn't have shape for inp in inputs: # Check", "# # During the translation of cond_block2, we'd have func_input_stack", "is None: return outputs = outputs if isinstance(outputs, (tuple, list))", "in tf_placeholder_names: if name not in user_input_names: placeholder_names.append(name) else: inputs", "import topsort, simple_topsort from .convert_utils import convert_graph from coremltools.converters.mil.mil import", "raise KeyError('Output node name \"{}\" does exist.'.format(n)) def check_placeholder_output(self, prog,", "list of tuple[Var] def add(self, tf_name, ssa_vars, is_new_var=True): \"\"\" ssa_vars:", "from coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes", "name=inp) shape = [get_new_symbol() if s is None or s", "ret.split(\":\")[0] def _validate_outputs(self, tfssa, outputs): if outputs is None: return", "have a different name than the last TF op's name.", "= tfssa self.global_type = {} self.inputs = None main_func =", "# Example: # # TF code: # x = tf.placeholder(tf.float32,", "[] # node name: make_input_0 op type: make_tuple inputs: ['Placeholder',", "user-specified input that doesn't have shape for inp in inputs:", "coremltools.converters.mil.input_types import Shape as InputShape from coremltools.converters.mil.mil.var import Var from", "raise ValueError( \"Input ({}) provided is not found in given", "tf.placeholder(tf.float32, shape=(1,)) # c = lambda i, j: \\ #", "1 new_name = v_o.name + \"_duplicate_\" + str(name_counts[v_o.name]) x =", "ssa_vars def add_graph(self, graph_name, graph): self.graphs[graph_name] = graph def get_graph(self,", "main_func.inputs: if inp not in placeholder_names: continue node = graph[inp]", "inputs[0].name = tf_placeholder_names[0] # filter out those inputs which is", "ssa_func: # Get the input Var for name in func_inputs.keys():", "found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import logging", "cond_block2(%a.x.x) { # ...some ops # } -> (%bool_var2) #", "[x.name for x in list(block.inputs.values())] with block: new_outputs = []", "this source code is governed by a BSD-3-clause license that", "to loop through the graphs. # This is NOT necessarily", "for inputtype in self.inputs: if not isinstance(inputtype.shape, InputShape): continue if", "in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import logging from", "stack so that we run conversion sequentially. self.graph_stack = self._get_stack(tfssa,", "inp in enumerate(inputs): # We set the default image format", "shape is None: raise ValueError(error_message) else: raise ValueError(error_message) return shape", "to change the block output to Placeholder:0 by inserting an", "{} instead.\".format( type(inputs) ) ) if not all([isinstance(i, InputType) for", "than the last TF op's name. # # Example: #", "tuple[Var] (multiple outputs) or Var (single_output) is_new_var: True if ssa_vars", "== \"Placeholder\"] placeholder_names = [] if inputs is not None:", "return shape def _get_stack(self, tfssa, root=\"main\"): # We're trying to", "== 0: raise ValueError(\"No func input available\") return self.func_input_stack[-1] def", "for tf_name. \"\"\" if tf_name in self.context: # Overriding allow", "x.name in tf_placeholder_names] # We fill in shapes for user-specified", "= output else: new_output = mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def", "= self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class does not accept -1 or", "trying to get a order of how to loop through", "# generate a new Var (e.g., get_tuple, Identity etc.), and", "(multiple outputs) or Var (single_output) is_new_var: True if ssa_vars are", "v_o.name = out_name self.check_placeholder_output(prog, self.outputs) @_profile def convert(self): prog =", "from coremltools.converters.mil.mil import Builder as mb from coremltools.converters.mil.mil import Program", "self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func) # check duplicate output # Note:", "format if not isinstance(inputs, (list, tuple)): raise ValueError( \"Type of", "from coremltools.converters.mil.input_types import Shape as InputShape from coremltools.converters.mil.mil.var import Var", "outputs: list of str or str, optional, defaults to None.", "type: Placeholder inputs: [] # node name: Placeholder_1 op type:", "out shapes changes during iterates) msg = \"TF var %s", "[] # node name: Placeholder_1 op type: Placeholder inputs: []", "tensor.name return ret.split(\":\")[0] def _validate_outputs(self, tfssa, outputs): if outputs is", "# TF code: # x = tf.placeholder(tf.float32, shape=(1,)) # y", "is not Placeholder. input_names = [x.name for x in self.inputs]", "\\n\" \\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name, name) if tfgraph[name].attr.get(\"shape\", None)", "main_func = tfssa.functions[\"main\"] graph = main_func.graph # Filter the inputs", "a stack so that we run conversion sequentially. self.graph_stack =", "mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self, prog, graph): func_inputs =", "etc.), and thus the # last Var would have a", "outputs if isinstance(outputs, (tuple, list)) else [outputs] output_nodes = []", "simply unpack a python tuple of Vars # ('while_0:0', 'while_0:1')", "{}.\" \\ \" Please provide its shape during conversion, using", "or input name was not provided\" ) if inp.name not", "output. # Note: only rename the output if the output", "or Var (single_output) is_new_var: True if ssa_vars are newly created", "ImageType, got {} instead.\".format( [type(i) for i in inputs] )", "provide its shape during conversion, using \\n\" \\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}',", "any([isinstance(s, RangeDim) for s in inputtype.shape.shape]): continue node = graph[inputtype.name]", "i, v_o in enumerate(block.outputs): if v_o.name not in name_counts: name_counts[v_o.name]", "python tuple of Vars # ('while_0:0', 'while_0:1') returned from while_0", "else \"\" self.context = {} self.graphs = {} # TF", "input available\") return self.func_input_stack[-1] def __getitem__(self, tf_name): if tf_name not", "nodes in while loop cond & body): # # node", "to only Placeholder names tf_placeholder_names = [n for n in", "@_profile def convert(self): prog = Program() if len(self.graph_stack) == 0:", "None) is not None: shape = tfgraph[name].attr[\"_output_shapes\"][0] if shape is", "%ret_bx) # # During the translation of cond_block2, we'd have", "fname not in dep[func_x]: dep[func_x].append(fname) if func_y and fname not", "inputs is not None: # Check inputs format if not", "# ...some ops # } -> (%new_a.x.x) # } ->", "\"\"\" ssa_vars: list[Var] / tuple[Var] (multiple outputs) or Var (single_output)", "instead.\".format( type(inputs) ) ) if not all([isinstance(i, InputType) for i", "inputs] for name in tf_placeholder_names: if name not in user_input_names:", "needed to figure out shapes changes during iterates) msg =", "self.graphs[graph_name] def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self): if len(self.func_input_stack) ==", "not None else \"\" self.context = {} self.graphs = {}", "NHWC is used # for TF unless GPU is specified", "can be # found in the LICENSE.txt file or at", "in output_nodes + all_nodes: raise KeyError('Output node name \"{}\" does", "output information from TensorFlow model. \"\"\" self.tfssa = tfssa self.global_type", "not provided self._validate_outputs(tfssa, outputs) outputs = main_func.outputs if outputs is", "coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes import", "else: ret = tensor.name return ret.split(\":\")[0] def _validate_outputs(self, tfssa, outputs):", "# Rename outputs to TF's name. This is needed when", "name_counts = {} new_outputs = [output for output in block.outputs]", "'{}'\".format( added_inputs) ) for idx, inp in enumerate(inputs): # We", "ValueError(error_message) return shape def _get_stack(self, tfssa, root=\"main\"): # We're trying", "Copyright (c) 2020, Apple Inc. All rights reserved. # #", "not in output_nodes + all_nodes: raise KeyError('Output node name \"{}\"", "# During the translation of cond_block2, we'd have func_input_stack #", "1 placeholder, we match them. if len(tf_placeholder_names) == 1 and", "found in given tensorflow graph. Placeholders in graph are: {}\".format(", "nodes or a str for single output name. If None,", "= tf.placeholder(tf.float32, shape=(1,)) # c = lambda i, j: \\", "only Placeholder names tf_placeholder_names = [n for n in graph", "enumerate(block.outputs): if v_o.name not in name_counts: name_counts[v_o.name] = 1 else:", "of tf_node.name --> ssa_var available # to the current TF", "= simple_topsort(dep) return graph_stack @staticmethod def _get_tensor_name(tensor): ret = None", "tfgraph[name].attr[\"_output_shapes\"][0] if shape is None: raise ValueError(error_message) else: raise ValueError(error_message)", "type: while inputs: ['make_input_0'] # node name: while/Exit op type:", "we'd have func_input_stack # # (%a.x.x,) # (%a.x, %b.x) #", "license that can be # found in the LICENSE.txt file", "(excluding the nodes in while loop cond & body): #", "the # output. # Note: only rename the output if", "a different name than the last TF op's name. #", "v_o in enumerate(block.outputs): if v_o.name not in name_counts: name_counts[v_o.name] =", "import Var from coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic", "A list of names of the output nodes or a", "== 1: if inputs[0].name is None: inputs[0].name = tf_placeholder_names[0] #", "add(self, tf_name, ssa_vars, is_new_var=True): \"\"\" ssa_vars: list[Var] / tuple[Var] (multiple", "tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\", None) is not None: shape = tfgraph[name].attr[\"_output_shapes\"][0]", "Warning: Node added to context must have the same name", "governed by a BSD-3-clause license that can be # found", "2020, Apple Inc. All rights reserved. # # Use of", "single output name. If None, the converter will try to", "the name passed to context.\" ) raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name]", "not found in context {}\" raise KeyError(msg.format(tf_name, self.name)) return self.context[tf_name]", "msg = \"TF root graph must be named 'main'. Got", "None: raise ValueError(error_message) else: raise ValueError(error_message) return shape def _get_stack(self,", ") raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] = ssa_vars def add_graph(self, graph_name,", "= {} self.graphs = {} # TF loops are represented", "return ret.split(\":\")[0] def _validate_outputs(self, tfssa, outputs): if outputs is None:", "information from TensorFlow model. \"\"\" self.tfssa = tfssa self.global_type =", "cases block = prog[\"main\"] with block: name_counts = {} new_outputs", "= [] for f in tfssa.functions.values(): output_nodes += list(f.outputs) all_nodes", "{ block3() { } -> (%Placeholder) } But self.outputs =", "i, j: \\ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b = lambda", "class TFConverter: def __init__(self, tfssa, inputs=None, outputs=None, **kwargs): \"\"\" tfssa:", "Var from coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic from", "[output for output in block.outputs] for i, v_o in enumerate(block.outputs):", "self.context[tf_name] def __contains__(self, tf_name): return tf_name in self.context class TFConverter:", "else [outputs] output_nodes = [] for f in tfssa.functions.values(): output_nodes", "fname not in dep[func_y]: dep[func_y].append(fname) assert len(dep[root]) == 0 graph_stack", "stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self): if len(self.func_input_stack) == 0: raise", "is not in tf_placeholder_names inputs = [x for x in", "op type: Placeholder inputs: [] # node name: Placeholder_1 op", "added_inputs = {} for inp in main_func.inputs: if inp not", "input_name or output.name == output_name: new_output = output else: new_output", "find the # output. # Note: only rename the output", "not in self.context: msg = \"TF var {} not found", "by users: '{}'\".format( added_inputs) ) for idx, inp in enumerate(inputs):", "placeholder_names = tf_placeholder_names # name -> (shape, mil_type) mapping. shape", "= [-1 if is_symbolic(s) else s for s in inputtype.shape.shape]", "var: '{}' -> '{}'\".format(v_o.name, out_name) ) v_o.name = out_name self.check_placeholder_output(prog,", "all([isinstance(i, InputType) for i in inputs]): raise ValueError( \"Type of", "in tfssa.functions} for fname in tfssa.functions: for node in tfssa.functions[fname].graph.values():", "in self.inputs] for v_o, out_name in zip(prog[\"main\"].outputs, self.outputs): if v_o.name", "duplicate output # Note: sometimes two outputs are pointing to", "inputs: [] # node name: Placeholder_1 op type: Placeholder inputs:", "shape during conversion, using \\n\" \\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name,", "KeyError('Output node name \"{}\" does exist.'.format(n)) def check_placeholder_output(self, prog, outputs_name):", "%ret_axx = while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) { # ...some ops #", "else s \\ for s in shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype))", "so nested loops becomes # stacked functions. Stacked functions are", "with Function(func_inputs) as ssa_func: # Get the input Var for", "a order of how to loop through the graphs. #", "is governed by a BSD-3-clause license that can be #", "in inputs if x.name in tf_placeholder_names] # We fill in", "Note: only rename the output if the output is not", "op type: get_tuple inputs: ['while_0'] # # Observe that return", "return self.context[tf_name] def __contains__(self, tf_name): return tf_name in self.context class", "None else outputs outputs = outputs if isinstance(outputs, (tuple, list))", "have shape for inp in inputs: # Check inputs existence", "to find the # output. # Note: only rename the", "for TF unless GPU is specified as device. if isinstance(inp,", "g_name in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) # Apply TF", "tf_name not in self.context: msg = \"TF var {} not", "# Apply TF frontend passes on Program. These passes are", "new_output = output else: new_output = mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs)", "tf_node.name --> ssa_var available # to the current TF -->", "block3() { } -> (%Placeholder) } But self.outputs = [\"Placeholder:0\"]", "outputs_name): if output.name not in input_name or output.name == output_name:", "{}\" raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name] def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs)", "= node.attr[\"body_function\"] func_y = node.attr[\"cond_function\"] if func_x and fname not", "translation of cond_block2, we'd have func_input_stack # # (%a.x.x,) #", "available # to the current TF --> tfssa transcription. class", "specify. user_input_names = [inp.name for inp in inputs] for name", "in dep[func_x]: dep[func_x].append(fname) if func_y and fname not in dep[func_y]:", "for i in inputs] ) ) # Special case: if", "in main_func.inputs: if inp not in placeholder_names: continue node =", "the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types", "if not isinstance(inputs, (list, tuple)): raise ValueError( \"Type of inputs", "str): ret = tensor else: ret = tensor.name return ret.split(\":\")[0]", "is a case where the program is like main(%Placeholder: (5,fp32))", "as functions, so nested loops becomes # stacked functions. Stacked", "of cond_block2, we'd have func_input_stack # # (%a.x.x,) # (%a.x,", "at https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types import ( InputType, TensorType,", "len(self.func_input_stack) == 0: raise ValueError(\"No func input available\") return self.func_input_stack[-1]", "else outputs outputs = outputs if isinstance(outputs, (tuple, list)) else", "name) if tfgraph[name].attr.get(\"shape\", None) is not None: shape = tfgraph[name].attr[\"shape\"]", "to None. A list of names of the output nodes", "logging.info(msg % tf_name) if is_new_var and isinstance(ssa_vars, Var) and tf_name", "('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to", "not all([isinstance(i, InputType) for i in inputs]): raise ValueError( \"Type", "def __init__(self, name=None): self.name = name if name is not", "cond & body): # # node name: Placeholder op type:", "node in tfssa.functions[fname].graph.values(): func_x, func_y = None, None if node.op", "if is_symbolic(s) else s for s in inputtype.shape.shape] node.attr[\"_output_shapes\"] =", "by inserting an identity \"\"\" block = prog[\"main\"] input_name =", "\" Please provide its shape during conversion, using \\n\" \\", "# We fill in shapes for user-specified input that doesn't", "is None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class does not", "Placeholder inputs: [] # node name: make_input_0 op type: make_tuple", "in enumerate(block.outputs): if v_o.name not in name_counts: name_counts[v_o.name] = 1", "coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil import", "None if node.op == \"while\": func_x = node.attr[\"body_function\"] func_y =", "a str for single output name. If None, the converter", "Var (single_output) is_new_var: True if ssa_vars are newly created for", "[] for f in tfssa.functions.values(): all_nodes += list(f.graph.keys()) for n", "while/Exit op type: get_tuple inputs: ['while_0'] # node name: while/Exit_1", "coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil import types from .basic_graph_ops import", "+= list(f.outputs) all_nodes = [] for f in tfssa.functions.values(): all_nodes", "are: {}\".format( inp.name, tf_placeholder_names ) ) if inp.shape is None:", "def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message = \"Unable to determine the", "raise ValueError( \"Type of inputs should be list or tuple", "tfssa, outputs): if outputs is None: return outputs = outputs", "a map of tf_node.name --> ssa_var available # to the", "passes are different # from passes applied to tfssa. self.tensorflow_passes(prog)", "tf_placeholder_names = [n for n in graph if graph[n].op ==", "coremltools.converters.mil.mil import types from .basic_graph_ops import topsort, simple_topsort from .convert_utils", "if tf_name in self.context: # Overriding allow us to translate", "'Placeholder_1'] # node name: while_0 op type: while inputs: ['make_input_0']", "that can be # found in the LICENSE.txt file or", "not in name_counts: name_counts[v_o.name] = 1 else: name_counts[v_o.name] += 1", "Apply TF frontend passes on Program. These passes are different", "# ....some ops using %ret_a # } -> (%ret_ax, %ret_bx)", "self._validate_outputs(tfssa, outputs) outputs = main_func.outputs if outputs is None else", "tf_name != ssa_vars.name: msg = ( \"MIL op's name ({})", "output from get_tuple, # which in our translation simply unpack", "len(added_inputs) > 0: logging.info( \"Adding Input not specified by users:", "graph[inp] dtype = node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape =", "graph are: {}\".format( inp.name, tf_placeholder_names ) ) if inp.shape is", "func_input_stack # # (%a.x.x,) # (%a.x, %b.x) # # where", "cases where placeholder is output. There is a case where", "and fname not in dep[func_x]: dep[func_x].append(fname) if func_y and fname", "def _get_tensor_name(tensor): ret = None if isinstance(tensor, str): ret =", "block.set_outputs(new_outputs) # Rename outputs to TF's name. This is needed", "\"main\": msg = \"TF root graph must be named 'main'.", "len(self.func_input_stack) == 0: raise ValueError(\"No func input available\") self.func_input_stack.pop() def", "**kwargs): \"\"\" tfssa: TensorFlow IR. inputs: list of TensorType or", "self.outputs): if v_o.name != out_name and v_o.name not in input_names:", "= [\"Placeholder:0\"] We need to change the block output to", "= while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) { # ...some ops # }", "when the last op doesn't # generate a new Var", "ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions[\"main\"].graph for g_name in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph)", "in self.graphs: msg = \"Graph '{}' not found in: {}\"", "shape=(_FILL_ME_) ),])\".format(name, name) if tfgraph[name].attr.get(\"shape\", None) is not None: shape", "tfssa, inputs=None, outputs=None, **kwargs): \"\"\" tfssa: TensorFlow IR. inputs: list", "inp.shape = _get_shaping_class(shape) # Extract placeholders that users didn't specify.", "Use of this source code is governed by a BSD-3-clause", "Builder as mb from coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil import", "for name in tf_placeholder_names: if name not in user_input_names: placeholder_names.append(name)", "# We're trying to get a order of how to", "v_o.name != out_name and v_o.name not in input_names: logging.info( \"Renaming", "op doesn't # generate a new Var (e.g., get_tuple, Identity", "# while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x, %b.x) { # ...some ops", "input's name or input name was not provided\" ) if", "InputShape from coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil import get_new_symbol from", "# stacked functions. Stacked functions are translated to nested #", "in input_names: logging.info( \"Renaming output var: '{}' -> '{}'\".format(v_o.name, out_name)", "Please provide its shape during conversion, using \\n\" \\ \"'ct.convert(...,", "shapes changes during iterates) msg = \"TF var %s is", "There is a case where the program is like main(%Placeholder:", "# which in our translation simply unpack a python tuple", "in order for users to find the # output. #", "= outputs # We would like a stack so that", "= (shape, dtype) if len(added_inputs) > 0: logging.info( \"Adding Input", "import logging from coremltools.converters.mil.input_types import ( InputType, TensorType, ImageType, RangeDim,", "# ...some ops # } -> (%bool_var1) # body_block1(%a.x, %b.x)", "# # TF code: # x = tf.placeholder(tf.float32, shape=(1,)) #", "out_name self.check_placeholder_output(prog, self.outputs) @_profile def convert(self): prog = Program() if", "must be named 'main'. Got {}\" raise ValueError(msg.format(self.graph_stack[0])) graph =", "from coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil import types from .basic_graph_ops", "node name: Placeholder_1 op type: Placeholder inputs: [] # node", "func_x and fname not in dep[func_x]: dep[func_x].append(fname) if func_y and", "from coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic", "is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name: msg =", "in name_counts: name_counts[v_o.name] = 1 else: name_counts[v_o.name] += 1 new_name", "node name \"{}\" does exist.'.format(n)) def check_placeholder_output(self, prog, outputs_name): \"\"\"", "# res = tf.while_loop(c, b, [x, y]) # # Resulting", "b = lambda i, j: (tf.add(i, 1), j) # res", "({}) does not match TensorFlow's node name ({}).\" \" Warning:", "rename the output if the output is not Placeholder. input_names", "dep[func_y].append(fname) assert len(dep[root]) == 0 graph_stack = simple_topsort(dep) return graph_stack", "doesn't # generate a new Var (e.g., get_tuple, Identity etc.),", "for n in outputs: if self._get_tensor_name(n) not in output_nodes +", "import is_tensor from coremltools.converters.mil.mil import types from .basic_graph_ops import topsort,", "present\") if self.graph_stack[0] != \"main\": msg = \"TF root graph", "order for users to find the # output. # Note:", "None: # Check inputs format if not isinstance(inputs, (list, tuple)):", "self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) # Apply TF frontend passes on Program.", "out_name in zip(prog[\"main\"].outputs, self.outputs): if v_o.name != out_name and v_o.name", "_get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message = \"Unable to determine the shape", "v_o.name not in input_names: logging.info( \"Renaming output var: '{}' ->", "translated to nested # blocks in Program, like # #", "if name is not None else \"\" self.context = {}", "is None or s == -1 else s \\ for", "order of how to loop through the graphs. # This", "= main_func.outputs if outputs is None else outputs outputs =", "None dimension. shape = [get_new_symbol() if s is None or", "not in tf_placeholder_names inputs = [x for x in inputs", "using %ret_a # } -> (%ret_ax, %ret_bx) # # During", "name as the name passed to context.\" ) raise ValueError(msg.format(tf_name,", "of the output nodes or a str for single output", "in: {}\" raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name] def stack_func_inputs(self, inputs):", "s \\ for s in shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp]", "loops becomes # stacked functions. Stacked functions are translated to", "# cond_block1(%a.x, %b.x) { # ...some ops # } ->", "outputs if not provided self._validate_outputs(tfssa, outputs) outputs = main_func.outputs if", "\"Graph '{}' not found in: {}\" raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return", "= {} self.inputs = None main_func = tfssa.functions[\"main\"] graph =", "name in tf_placeholder_names: if name not in user_input_names: placeholder_names.append(name) else:", "self.context: # Overriding allow us to translate while_loop body twice", "output if the output is not Placeholder. input_names = [x.name", "ImageType, RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types import Shape as InputShape", "Vars # ('while_0:0', 'while_0:1') returned from while_0 SSA op. We", "# (%a.x, %b.x) # # where [%a.x.x] would be unstacked", "s in shape] inp.shape = _get_shaping_class(shape) # Extract placeholders that", "[get_new_symbol() if s is None or s == -1 else", "the input Var for name in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs", "existence if inp.name is None: raise ValueError( \"Unable to infer", "functions, so nested loops becomes # stacked functions. Stacked functions", "get_graph(self, graph_name): if graph_name not in self.graphs: msg = \"Graph", "outputs: if self._get_tensor_name(n) not in output_nodes + all_nodes: raise KeyError('Output", "in tf_placeholder_names: raise ValueError( \"Input ({}) provided is not found", "!= \"main\": msg = \"TF root graph must be named", "__init__(self, name=None): self.name = name if name is not None", "\"Renaming output var: '{}' -> '{}'\".format(v_o.name, out_name) ) v_o.name =", "not provided\" ) if inp.name not in tf_placeholder_names: raise ValueError(", "# infer outputs if not provided self._validate_outputs(tfssa, outputs) outputs =", "the program is like main(%Placeholder: (5,fp32)) { block3() { }", "# # Resulting nodes (excluding the nodes in while loop", "\"Placeholder\"] placeholder_names = [] if inputs is not None: #", "self.graphs: msg = \"Graph '{}' not found in: {}\" raise", "default image format in TF as NHWC, since NHWC is", "specified by users: '{}'\".format( added_inputs) ) for idx, inp in", "of tuple[Var] def add(self, tf_name, ssa_vars, is_new_var=True): \"\"\" ssa_vars: list[Var]", "added to context must have the same name as the", "= {} for inp in main_func.inputs: if inp not in", "loop cond & body): # # node name: Placeholder op", "inp in inputs: # Check inputs existence if inp.name is", "found in: {}\" raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name] def stack_func_inputs(self,", "import Function from .ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils import _profile", "def __init__(self, tfssa, inputs=None, outputs=None, **kwargs): \"\"\" tfssa: TensorFlow IR.", "the nodes in while loop cond & body): # #", "ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] = ssa_vars def add_graph(self, graph_name, graph): self.graphs[graph_name]", "name_counts: name_counts[v_o.name] = 1 else: name_counts[v_o.name] += 1 new_name =", "if self.graph_stack[0] != \"main\": msg = \"TF root graph must", "# Note: only rename the output if the output is", "case where the program is like main(%Placeholder: (5,fp32)) { block3()", "\"Type of inputs should be list or tuple of TensorType", "translate while_loop body twice (which is # needed to figure", "= _get_shaping_class(shape) # Extract placeholders that users didn't specify. user_input_names", "_get_stack(self, tfssa, root=\"main\"): # We're trying to get a order", "self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class does not accept -1 or None", "self.func_input_stack.pop() def get_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No func", "(5,fp32)) { block3() { } -> (%Placeholder) } But self.outputs", "change the block output to Placeholder:0 by inserting an identity", "-> (%ret_ax, %ret_bx) # # During the translation of cond_block2,", "self.func_input_stack[-1] def __getitem__(self, tf_name): if tf_name not in self.context: msg", "main_func.outputs if outputs is None else outputs outputs = outputs", "= [] for output, output_name in zip(block.outputs, outputs_name): if output.name", "# tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b = lambda i, j: (tf.add(i,", "if len(self.func_input_stack) == 0: raise ValueError(\"No func input available\") self.func_input_stack.pop()", "if ssa_vars are newly created for tf_name. \"\"\" if tf_name", "is an output from get_tuple, # which in our translation", "name in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs = convert_graph(self.context, graph, self.outputs)", "def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self): if len(self.func_input_stack) == 0:", "ssa_vars, is_new_var=True): \"\"\" ssa_vars: list[Var] / tuple[Var] (multiple outputs) or", "a BSD-3-clause license that can be # found in the", "list of str or str, optional, defaults to None. A", "shape = tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\", None) is not None: shape", "= outputs if isinstance(outputs, (tuple, list)) else [outputs] output_nodes =", "block: name_counts = {} new_outputs = [output for output in", "of this source code is governed by a BSD-3-clause license", "[] for x in tfssa.functions} for fname in tfssa.functions: for", "inp.name is None: raise ValueError( \"Unable to infer input's name", "not isinstance(inputs, (list, tuple)): raise ValueError( \"Type of inputs should", "} But self.outputs = [\"Placeholder:0\"] We need to change the", "determine the shape of input: {}.\" \\ \" Please provide", "or tuple, got {} instead.\".format( type(inputs) ) ) if not", "make_tuple inputs: ['Placeholder', # 'Placeholder_1'] # node name: while_0 op", "tuple)): raise ValueError( \"Type of inputs should be list or", "func input available\") self.func_input_stack.pop() def get_func_inputs(self): if len(self.func_input_stack) == 0:", "output nodes or a str for single output name. If", "ssa_vars.name)) self.context[tf_name] = ssa_vars def add_graph(self, graph_name, graph): self.graphs[graph_name] =", "nested loops becomes # stacked functions. Stacked functions are translated", "shape = tfgraph[name].attr[\"_output_shapes\"][0] if shape is None: raise ValueError(error_message) else:", "import ( InputType, TensorType, ImageType, RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types", "node name: make_input_0 op type: make_tuple inputs: ['Placeholder', # 'Placeholder_1']", "filter out those inputs which is not in tf_placeholder_names inputs", "TF function must be present\") if self.graph_stack[0] != \"main\": msg", "dep = {x: [] for x in tfssa.functions} for fname", "in enumerate(inputs): # We set the default image format in", "import _profile # TranscriptionContext maintains a map of tf_node.name -->", "Rename outputs to TF's name. This is needed when the", "\\ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b = lambda i, j:", "= [n for n in graph if graph[n].op == \"Placeholder\"]", "have the same name as the name passed to context.\"", "self.context.add(name, ssa_func.inputs[name]) outputs = convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func)", "if inp.name is None: raise ValueError( \"Unable to infer input's", "InputType, TensorType, ImageType, RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types import Shape", "how to loop through the graphs. # This is NOT", "{}\" raise ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions[\"main\"].graph for g_name in self.graph_stack[1:]:", "name: Placeholder_1 op type: Placeholder inputs: [] # node name:", "for x in list(block.inputs.values())] with block: new_outputs = [] for", "\\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name, name) if tfgraph[name].attr.get(\"shape\", None) is", "None: shape = tfgraph[name].attr[\"_output_shapes\"][0] if shape is None: raise ValueError(error_message)", "import Shape as InputShape from coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil", "topsort, simple_topsort from .convert_utils import convert_graph from coremltools.converters.mil.mil import Builder", "@staticmethod def _get_tensor_name(tensor): ret = None if isinstance(tensor, str): ret", "inputs should be list or tuple, got {} instead.\".format( type(inputs)", "def __getitem__(self, tf_name): if tf_name not in self.context: msg =", "ValueError(\"At least one TF function must be present\") if self.graph_stack[0]", "of names of the output nodes or a str for", "be list or tuple, got {} instead.\".format( type(inputs) ) )", "to infer input's name or input name was not provided\"", "_get_shaping_class, ) from coremltools.converters.mil.input_types import Shape as InputShape from coremltools.converters.mil.mil.var", "or s == -1 else s \\ for s in", "# to the current TF --> tfssa transcription. class TranscriptionContext:", "them. if len(tf_placeholder_names) == 1 and len(inputs) == 1: if", "is needed when the last op doesn't # generate a", "{} for input_type in self.inputs: func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype)", "like a stack so that we run conversion sequentially. self.graph_stack", "# _get_shaping_class does not accept -1 or None dimension. shape", "= {} # TF loops are represented as functions, so", "[inp.name for inp in inputs] for name in tf_placeholder_names: if", "{ } -> (%Placeholder) } But self.outputs = [\"Placeholder:0\"] We", "file or at https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types import (", "to the same Var, we should # create mb.identity for", "we match them. if len(tf_placeholder_names) == 1 and len(inputs) ==", "placeholder_names: continue node = graph[inp] dtype = node.attr['dtype'] shape =", "outputs # We would like a stack so that we", "= Program() if len(self.graph_stack) == 0: raise ValueError(\"At least one", "(tf.add(i, 1), j) # res = tf.while_loop(c, b, [x, y])", "tf_placeholder_names: raise ValueError( \"Input ({}) provided is not found in", "from .ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils import _profile # TranscriptionContext", "ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func) # check duplicate output # Note: sometimes", "if output.name not in input_name or output.name == output_name: new_output", "must be present\") if self.graph_stack[0] != \"main\": msg = \"TF", "= [output for output in block.outputs] for i, v_o in", "# x = tf.placeholder(tf.float32, shape=(1,)) # y = tf.placeholder(tf.float32, shape=(1,))", "is # needed to figure out shapes changes during iterates)", "\"Unable to determine the shape of input: {}.\" \\ \"", "# We would like a stack so that we run", "inputs existence if inp.name is None: raise ValueError( \"Unable to", "simple_topsort from .convert_utils import convert_graph from coremltools.converters.mil.mil import Builder as", "# for TF unless GPU is specified as device. if", "= None, None if node.op == \"while\": func_x = node.attr[\"body_function\"]", "continue if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]): continue node", "shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape = [get_new_symbol() if s is", "j: (tf.add(i, 1), j) # res = tf.while_loop(c, b, [x,", "inputs: ['Placeholder', # 'Placeholder_1'] # node name: while_0 op type:", "= x block.set_outputs(new_outputs) # Rename outputs to TF's name. This", "dep[func_x].append(fname) if func_y and fname not in dep[func_y]: dep[func_y].append(fname) assert", "in inputtype.shape.shape]): continue node = graph[inputtype.name] shape = [-1 if", "Placeholder inputs: [] # node name: Placeholder_1 op type: Placeholder", "context.\" ) raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] = ssa_vars def add_graph(self,", "tf_name in self.context class TFConverter: def __init__(self, tfssa, inputs=None, outputs=None,", "[] # list of tuple[Var] def add(self, tf_name, ssa_vars, is_new_var=True):", "\"\"\" Handle the cases where placeholder is output. There is", "Placeholder op type: Placeholder inputs: [] # node name: Placeholder_1", "using \\n\" \\ \"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name, name) if tfgraph[name].attr.get(\"shape\",", "not in placeholder_names: continue node = graph[inp] dtype = node.attr['dtype']", "TF loops are represented as functions, so nested loops becomes", "outputs if isinstance(outputs, (tuple, list)) else [outputs] outputs = [x", "{ # %ret_axx = while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) { # ...some", "= main_func.graph # Filter the inputs to only Placeholder names", "is not None else \"\" self.context = {} self.graphs =", "!= out_name and v_o.name not in input_names: logging.info( \"Renaming output", "enumerate(inputs): # We set the default image format in TF", "name is not None else \"\" self.context = {} self.graphs", "name=inp.name) # _get_shaping_class does not accept -1 or None dimension.", "if len(tf_placeholder_names) == 1 and len(inputs) == 1: if inputs[0].name", "and v_o.name not in input_names: logging.info( \"Renaming output var: '{}'", "list(block.inputs.values())] with block: new_outputs = [] for output, output_name in", "# } -> (%ret_ax, %ret_bx) # # During the translation", "dtype) if len(added_inputs) > 0: logging.info( \"Adding Input not specified", "%b)) # cond_block1(%a.x, %b.x) { # ...some ops # }", "is not found in given tensorflow graph. Placeholders in graph", "match them. if len(tf_placeholder_names) == 1 and len(inputs) == 1:", "outputs_name): \"\"\" Handle the cases where placeholder is output. There", "if not isinstance(inputtype.shape, InputShape): continue if any([isinstance(s, RangeDim) for s", "in inputs] for name in tf_placeholder_names: if name not in", "not in user_input_names: placeholder_names.append(name) else: inputs = [] placeholder_names =", "Got {}\" raise ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions[\"main\"].graph for g_name in", "op type: while inputs: ['make_input_0'] # node name: while/Exit op", "or a str for single output name. If None, the", "new_outputs[i] = x block.set_outputs(new_outputs) # Rename outputs to TF's name.", "for input_type in self.inputs: func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs)", "self.func_input_stack = [] # list of tuple[Var] def add(self, tf_name,", "if inp not in placeholder_names: continue node = graph[inp] dtype", "{} self.inputs = None main_func = tfssa.functions[\"main\"] graph = main_func.graph", "users to find the # output. # Note: only rename", "ssa_func.inputs[name]) outputs = convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func) #", "[outputs] output_nodes = [] for f in tfssa.functions.values(): output_nodes +=", "name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self, prog, graph): func_inputs = {}", "in inputs]): raise ValueError( \"Type of inputs should be list", "[\"Placeholder:0\"] We need to change the block output to Placeholder:0", "None or s == -1 else s \\ for s", "tfgraph, name): error_message = \"Unable to determine the shape of", "type: Placeholder inputs: [] # node name: make_input_0 op type:", "graph_stack = simple_topsort(dep) return graph_stack @staticmethod def _get_tensor_name(tensor): ret =", "rights reserved. # # Use of this source code is", "type(inputs) ) ) if not all([isinstance(i, InputType) for i in", "raise ValueError(\"At least one TF function must be present\") if", "Example: # # TF code: # x = tf.placeholder(tf.float32, shape=(1,))", "for i, v_o in enumerate(block.outputs): if v_o.name not in name_counts:", "to figure out shapes changes during iterates) msg = \"TF", "\\ for s in shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp] =", "self.func_input_stack.append(inputs) def unstack_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No func", "TensorType, ImageType, RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types import Shape as", "not None: # Check inputs format if not isinstance(inputs, (list,", "# Extract placeholders that users didn't specify. user_input_names = [inp.name", "shape has type list[int] added_inputs = {} for inp in", "These passes are different # from passes applied to tfssa.", "# node name: while/Exit op type: get_tuple inputs: ['while_0'] #", "(shape, dtype) if len(added_inputs) > 0: logging.info( \"Adding Input not", "if v_o.name != out_name and v_o.name not in input_names: logging.info(", "Function from .ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils import _profile #", "name ({}).\" \" Warning: Node added to context must have", "if inputs is not None: # Check inputs format if", "self.outputs = [\"Placeholder:0\"] We need to change the block output", "= [] for f in tfssa.functions.values(): all_nodes += list(f.graph.keys()) for", "isinstance(tensor, str): ret = tensor else: ret = tensor.name return", "{ # ...some ops # } -> (%new_a.x.x) # }", "self.graphs = {} # TF loops are represented as functions,", "# Overriding allow us to translate while_loop body twice (which", "\\ \" Please provide its shape during conversion, using \\n\"", "inputs] ) ) # Special case: if there's only 1", "# TranscriptionContext maintains a map of tf_node.name --> ssa_var available", "[] if inputs is not None: # Check inputs format", "return self.func_input_stack[-1] def __getitem__(self, tf_name): if tf_name not in self.context:", "TensorFlow model. \"\"\" self.tfssa = tfssa self.global_type = {} self.inputs", "provided\" ) if inp.name not in tf_placeholder_names: raise ValueError( \"Input", "ssa_vars.name: msg = ( \"MIL op's name ({}) does not", "output else: new_output = mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self,", "tfssa.functions.values(): output_nodes += list(f.outputs) all_nodes = [] for f in", "len(dep[root]) == 0 graph_stack = simple_topsort(dep) return graph_stack @staticmethod def", "main_func.graph # Filter the inputs to only Placeholder names tf_placeholder_names", "# node name: make_input_0 op type: make_tuple inputs: ['Placeholder', #", "# node name: while_0 op type: while inputs: ['make_input_0'] #", "the current TF --> tfssa transcription. class TranscriptionContext: def __init__(self,", "blocks in Program, like # # while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x,", "extract the output information from TensorFlow model. \"\"\" self.tfssa =", "\"\"\" tfssa: TensorFlow IR. inputs: list of TensorType or ImageType,", "If None, the converter will try to extract the output", "y]) # # Resulting nodes (excluding the nodes in while", "len(self.graph_stack) == 0: raise ValueError(\"At least one TF function must", "list or tuple of TensorType or ImageType, got {} instead.\".format(", "Function(func_inputs) as ssa_func: # Get the input Var for name", "%b.x) # # where [%a.x.x] would be unstacked once cond_block2", "\"TF var %s is added again. Overriding previous value\" logging.info(msg", "> 0: logging.info( \"Adding Input not specified by users: '{}'\".format(", "# # where [%a.x.x] would be unstacked once cond_block2 is", "# y = tf.placeholder(tf.float32, shape=(1,)) # c = lambda i,", "= \"TF root graph must be named 'main'. Got {}\"", "inp.name not in tf_placeholder_names: raise ValueError( \"Input ({}) provided is", "(%a.x.x,) # (%a.x, %b.x) # # where [%a.x.x] would be", "input: {}.\" \\ \" Please provide its shape during conversion,", "iterates) msg = \"TF var %s is added again. Overriding", "str for single output name. If None, the converter will", "func_x = node.attr[\"body_function\"] func_y = node.attr[\"cond_function\"] if func_x and fname", "last Var would have a different name than the last", "`while/Exit` is an output from get_tuple, # which in our", "from coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil", "{} not found in context {}\" raise KeyError(msg.format(tf_name, self.name)) return", "+ \"_duplicate_\" + str(name_counts[v_o.name]) x = mb.identity(x=v_o, name=new_name) new_outputs[i] =", "in list(block.inputs.values())] with block: new_outputs = [] for output, output_name", "else: new_output = mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self, prog,", "Program from coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes import tensorflow_passes from", "-> (%bool_var1) # body_block1(%a.x, %b.x) { # %ret_axx = while_loop(loop_vars=(%a.x,))", "else s \\ for s in shape] inp.shape = _get_shaping_class(shape)", "we should # create mb.identity for those cases block =", "name_counts[v_o.name] = 1 else: name_counts[v_o.name] += 1 new_name = v_o.name", "tfgraph[name].attr.get(\"shape\", None) is not None: shape = tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\",", "found in context {}\" raise KeyError(msg.format(tf_name, self.name)) return self.context[tf_name] def", "op type: make_tuple inputs: ['Placeholder', # 'Placeholder_1'] # node name:", "tfssa.functions: for node in tfssa.functions[fname].graph.values(): func_x, func_y = None, None", "# Resulting nodes (excluding the nodes in while loop cond", "s in inputtype.shape.shape] node.attr[\"_output_shapes\"] = [shape] # list of length", "['Placeholder', # 'Placeholder_1'] # node name: while_0 op type: while", "of TensorType or ImageType, optional, defaults to None. outputs: list", "-> (shape, mil_type) mapping. shape has type list[int] added_inputs =", "need to change the block output to Placeholder:0 by inserting", "= [] placeholder_names = tf_placeholder_names # name -> (shape, mil_type)", "two outputs are pointing to the same Var, we should", "( InputType, TensorType, ImageType, RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types import", "code: # x = tf.placeholder(tf.float32, shape=(1,)) # y = tf.placeholder(tf.float32,", "tuple(inputs) for inputtype in self.inputs: if not isinstance(inputtype.shape, InputShape): continue", "( \"MIL op's name ({}) does not match TensorFlow's node", "inputs = [] placeholder_names = tf_placeholder_names # name -> (shape,", "{ # ...some ops # } -> (%bool_var1) # body_block1(%a.x,", "while inputs: ['make_input_0'] # node name: while/Exit op type: get_tuple", "input_names: logging.info( \"Renaming output var: '{}' -> '{}'\".format(v_o.name, out_name) )", "None: raise ValueError( \"Unable to infer input's name or input", "ssa_var available # to the current TF --> tfssa transcription.", "all_nodes: raise KeyError('Output node name \"{}\" does exist.'.format(n)) def check_placeholder_output(self,", "def convert_main_graph(self, prog, graph): func_inputs = {} for input_type in", "= convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function(\"main\", ssa_func) # check duplicate", "= tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message = \"Unable to", "= mb.identity(x=v_o, name=new_name) new_outputs[i] = x block.set_outputs(new_outputs) # Rename outputs", "res = tf.while_loop(c, b, [x, y]) # # Resulting nodes", "got {} instead.\".format( [type(i) for i in inputs] ) )", "list)) else [outputs] output_nodes = [] for f in tfssa.functions.values():", "def get_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No func input", "GPU is specified as device. if isinstance(inp, ImageType) and inputs[idx].channel_first", "logging from coremltools.converters.mil.input_types import ( InputType, TensorType, ImageType, RangeDim, _get_shaping_class,", "or at https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types import ( InputType,", "1 else: name_counts[v_o.name] += 1 new_name = v_o.name + \"_duplicate_\"", "in shape] inp.shape = _get_shaping_class(shape) # Extract placeholders that users", "isinstance(inputtype.shape, InputShape): continue if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]):", "({}).\" \" Warning: Node added to context must have the", "x block.set_outputs(new_outputs) # Rename outputs to TF's name. This is", "is not None: shape = tfgraph[name].attr[\"_output_shapes\"][0] if shape is None:", "inputtype.shape.shape]): continue node = graph[inputtype.name] shape = [-1 if is_symbolic(s)", "} -> (%Placeholder) } But self.outputs = [\"Placeholder:0\"] We need", "or ImageType, optional, defaults to None. outputs: list of str", "str(name_counts[v_o.name]) x = mb.identity(x=v_o, name=new_name) new_outputs[i] = x block.set_outputs(new_outputs) #", "if tf_name not in self.context: msg = \"TF var {}", "Var (e.g., get_tuple, Identity etc.), and thus the # last", "be list or tuple of TensorType or ImageType, got {}", "get_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError(\"No func input available\")", "\"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])\".format(name, name) if tfgraph[name].attr.get(\"shape\", None) is not", "added_inputs[inp] = (shape, dtype) if len(added_inputs) > 0: logging.info( \"Adding", "outputs): if outputs is None: return outputs = outputs if", "(c) 2020, Apple Inc. All rights reserved. # # Use", "block.set_outputs(new_outputs) def convert_main_graph(self, prog, graph): func_inputs = {} for input_type", "in user_input_names: placeholder_names.append(name) else: inputs = [] placeholder_names = tf_placeholder_names", "= None if isinstance(tensor, str): ret = tensor else: ret", "name passed to context.\" ) raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] =", "self.name = name if name is not None else \"\"", "\"\"\" if tf_name in self.context: # Overriding allow us to", "# check duplicate output # Note: sometimes two outputs are", "for x in outputs] self.outputs = outputs # We would", "again. Overriding previous value\" logging.info(msg % tf_name) if is_new_var and", "TFConverter: def __init__(self, tfssa, inputs=None, outputs=None, **kwargs): \"\"\" tfssa: TensorFlow", "func input available\") return self.func_input_stack[-1] def __getitem__(self, tf_name): if tf_name", "raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] = ssa_vars def add_graph(self, graph_name, graph):", ") ) # Special case: if there's only 1 input", "<filename>coremltools/converters/mil/frontend/tensorflow/converter.py # Copyright (c) 2020, Apple Inc. All rights reserved.", "in inputs: # Check inputs existence if inp.name is None:", "During the translation of cond_block2, we'd have func_input_stack # #", "Apple Inc. All rights reserved. # # Use of this", "will try to extract the output information from TensorFlow model.", "accept -1 or None dimension. shape = [get_new_symbol() if s", "raise ValueError( \"Unable to infer input's name or input name", "assert len(dep[root]) == 0 graph_stack = simple_topsort(dep) return graph_stack @staticmethod", "is_symbolic from coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil import types from", "if name not in user_input_names: placeholder_names.append(name) else: inputs = []", "a case where the program is like main(%Placeholder: (5,fp32)) {", "inp not in placeholder_names: continue node = graph[inp] dtype =", "if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name: msg", "not match TensorFlow's node name ({}).\" \" Warning: Node added", "ssa_func) # check duplicate output # Note: sometimes two outputs", "if tfgraph[name].attr.get(\"shape\", None) is not None: shape = tfgraph[name].attr[\"shape\"] elif", "'{}'\".format(v_o.name, out_name) ) v_o.name = out_name self.check_placeholder_output(prog, self.outputs) @_profile def", "zip(block.outputs, outputs_name): if output.name not in input_name or output.name ==", "type list[int] added_inputs = {} for inp in main_func.inputs: if", "['while_0'] # node name: while/Exit_1 op type: get_tuple inputs: ['while_0']", "tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b = lambda i, j: (tf.add(i, 1),", "inserting an identity \"\"\" block = prog[\"main\"] input_name = [x.name", "self._get_stack(tfssa, root=\"main\") self.context = TranscriptionContext() self.tensorflow_passes = tensorflow_passes def _get_placeholder_shape_from_tf_graph(self,", "name: while/Exit_1 op type: get_tuple inputs: ['while_0'] # # Observe", "infer outputs if not provided self._validate_outputs(tfssa, outputs) outputs = main_func.outputs", "didn't specify. user_input_names = [inp.name for inp in inputs] for", "ValueError(\"No func input available\") return self.func_input_stack[-1] def __getitem__(self, tf_name): if", "{x: [] for x in tfssa.functions} for fname in tfssa.functions:", "import tensorflow_passes from coremltools.converters._profile_utils import _profile # TranscriptionContext maintains a", "Extract placeholders that users didn't specify. user_input_names = [inp.name for", "str or str, optional, defaults to None. A list of", "to the current TF --> tfssa transcription. class TranscriptionContext: def", "if outputs is None: return outputs = outputs if isinstance(outputs,", "list of names of the output nodes or a str", "# list of tuple[Var] def add(self, tf_name, ssa_vars, is_new_var=True): \"\"\"", "for single output name. If None, the converter will try", "name=new_name) new_outputs[i] = x block.set_outputs(new_outputs) # Rename outputs to TF's", "1: if inputs[0].name is None: inputs[0].name = tf_placeholder_names[0] # filter", "and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name: msg = (", "outputs is None: return outputs = outputs if isinstance(outputs, (tuple,", "for inp in inputs] for name in tf_placeholder_names: if name", "-> (%new_a.x.x) # } -> (%ret_axx) # ....some ops using", "of how to loop through the graphs. # This is", "node.op == \"while\": func_x = node.attr[\"body_function\"] func_y = node.attr[\"cond_function\"] if", "node `while/Exit` is an output from get_tuple, # which in", "else x.name for x in outputs] self.outputs = outputs #", "is None: raise ValueError(error_message) else: raise ValueError(error_message) return shape def", "provided self._validate_outputs(tfssa, outputs) outputs = main_func.outputs if outputs is None", "[x if isinstance(x, str) else x.name for x in outputs]", "import Builder as mb from coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil", "tfssa.functions} for fname in tfssa.functions: for node in tfssa.functions[fname].graph.values(): func_x,", "[x for x in inputs if x.name in tf_placeholder_names] #", "input and 1 placeholder, we match them. if len(tf_placeholder_names) ==", "%b.x) { # %ret_axx = while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) { #", "= [x if isinstance(x, str) else x.name for x in", "in tfssa.functions: for node in tfssa.functions[fname].graph.values(): func_x, func_y = None,", "are pointing to the same Var, we should # create", "if inp.shape is None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class", "if isinstance(inp, ImageType) and inputs[idx].channel_first is None: inputs[idx].channel_first = False", "is not None: shape = tfgraph[name].attr[\"shape\"] elif tfgraph[name].attr.get(\"_output_shapes\", None) is", "& body): # # node name: Placeholder op type: Placeholder", "# ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need", "None main_func = tfssa.functions[\"main\"] graph = main_func.graph # Filter the", "self.graphs[graph_name] = graph def get_graph(self, graph_name): if graph_name not in", "def add_graph(self, graph_name, graph): self.graphs[graph_name] = graph def get_graph(self, graph_name):", "We need to # rename `while_0:0` to `while/Exit` in order", "to context must have the same name as the name", "mil_type) mapping. shape has type list[int] added_inputs = {} for", "doesn't have shape for inp in inputs: # Check inputs", "= mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs) as ssa_func: #", "in zip(block.outputs, outputs_name): if output.name not in input_name or output.name", "or str, optional, defaults to None. A list of names", "f in tfssa.functions.values(): all_nodes += list(f.graph.keys()) for n in outputs:", "# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import", "tuple of Vars # ('while_0:0', 'while_0:1') returned from while_0 SSA", ".basic_graph_ops import topsort, simple_topsort from .convert_utils import convert_graph from coremltools.converters.mil.mil", "conversion sequentially. self.graph_stack = self._get_stack(tfssa, root=\"main\") self.context = TranscriptionContext() self.tensorflow_passes", "tf_placeholder_names: if name not in user_input_names: placeholder_names.append(name) else: inputs =", "-1 else s \\ for s in shape] inp.shape =", "types from .basic_graph_ops import topsort, simple_topsort from .convert_utils import convert_graph", "list(f.graph.keys()) for n in outputs: if self._get_tensor_name(n) not in output_nodes", "self.outputs = outputs # We would like a stack so", "given tensorflow graph. Placeholders in graph are: {}\".format( inp.name, tf_placeholder_names", "....some ops using %ret_a # } -> (%ret_ax, %ret_bx) #", "on Program. These passes are different # from passes applied", "isinstance(outputs, (tuple, list)) else [outputs] outputs = [x if isinstance(x,", "those cases block = prog[\"main\"] with block: name_counts = {}", "= node.attr[\"cond_function\"] if func_x and fname not in dep[func_x]: dep[func_x].append(fname)", "block: new_outputs = [] for output, output_name in zip(block.outputs, outputs_name):", "func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs) as ssa_func:", "in graph are: {}\".format( inp.name, tf_placeholder_names ) ) if inp.shape", "mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs) as ssa_func: # Get", "graph. Placeholders in graph are: {}\".format( inp.name, tf_placeholder_names ) )", "during iterates) msg = \"TF var %s is added again.", "to determine the shape of input: {}.\" \\ \" Please", "self.inputs: func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs) as", "dep[func_x]: dep[func_x].append(fname) if func_y and fname not in dep[func_y]: dep[func_y].append(fname)", "node name: while/Exit op type: get_tuple inputs: ['while_0'] # node", "necessarily a DAG. dep = {x: [] for x in", "= None main_func = tfssa.functions[\"main\"] graph = main_func.graph # Filter", "pointing to the same Var, we should # create mb.identity", "in self.context: msg = \"TF var {} not found in", "which is not in tf_placeholder_names inputs = [x for x", "i, j: (tf.add(i, 1), j) # res = tf.while_loop(c, b,", "name: make_input_0 op type: make_tuple inputs: ['Placeholder', # 'Placeholder_1'] #", "same name as the name passed to context.\" ) raise", "1 and len(inputs) == 1: if inputs[0].name is None: inputs[0].name", "_validate_outputs(self, tfssa, outputs): if outputs is None: return outputs =", "= tensor.name return ret.split(\":\")[0] def _validate_outputs(self, tfssa, outputs): if outputs", "nested # blocks in Program, like # # while_loop(loop_vars=(%a, %b))", "in block.outputs] for i, v_o in enumerate(block.outputs): if v_o.name not", "tensorflow graph. Placeholders in graph are: {}\".format( inp.name, tf_placeholder_names )", "not specified by users: '{}'\".format( added_inputs) ) for idx, inp", "context {}\" raise KeyError(msg.format(tf_name, self.name)) return self.context[tf_name] def __contains__(self, tf_name):", "import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types import is_tensor", "format in TF as NHWC, since NHWC is used #", "while_0 op type: while inputs: ['make_input_0'] # node name: while/Exit", "for f in tfssa.functions.values(): all_nodes += list(f.graph.keys()) for n in", "length 1 # infer outputs if not provided self._validate_outputs(tfssa, outputs)", "['make_input_0'] # node name: while/Exit op type: get_tuple inputs: ['while_0']", "inp in main_func.inputs: if inp not in placeholder_names: continue node", "raise ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions[\"main\"].graph for g_name in self.graph_stack[1:]: self.context.add_graph(g_name,", "ValueError( \"Type of inputs should be list or tuple of", "in placeholder_names: continue node = graph[inp] dtype = node.attr['dtype'] shape", "while/Exit_1 op type: get_tuple inputs: ['while_0'] # # Observe that", "block output to Placeholder:0 by inserting an identity \"\"\" block", "in inputtype.shape.shape] node.attr[\"_output_shapes\"] = [shape] # list of length 1", "Filter the inputs to only Placeholder names tf_placeholder_names = [n", "# needed to figure out shapes changes during iterates) msg", "= [x.name for x in list(block.inputs.values())] with block: new_outputs =", "placeholder_names = [] if inputs is not None: # Check", "self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) # Apply TF frontend passes on", "should be list or tuple, got {} instead.\".format( type(inputs) )", "Placeholders in graph are: {}\".format( inp.name, tf_placeholder_names ) ) if", "[%a.x.x] would be unstacked once cond_block2 is done. self.func_input_stack =", "= tf_placeholder_names # name -> (shape, mil_type) mapping. shape has", "tfssa, root=\"main\"): # We're trying to get a order of", "= tfssa.functions[\"main\"] graph = main_func.graph # Filter the inputs to", "zip(prog[\"main\"].outputs, self.outputs): if v_o.name != out_name and v_o.name not in", "have func_input_stack # # (%a.x.x,) # (%a.x, %b.x) # #", "for x in inputs if x.name in tf_placeholder_names] # We", "'{}' not found in: {}\" raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name]", "the graphs. # This is NOT necessarily a DAG. dep", "self.name)) return self.context[tf_name] def __contains__(self, tf_name): return tf_name in self.context", "us to translate while_loop body twice (which is # needed", "graph): func_inputs = {} for input_type in self.inputs: func_inputs[input_type.name] =", "in outputs] self.outputs = outputs # We would like a", "= [inp.name for inp in inputs] for name in tf_placeholder_names:", "or output.name == output_name: new_output = output else: new_output =", "input available\") self.func_input_stack.pop() def get_func_inputs(self): if len(self.func_input_stack) == 0: raise", "of TensorType or ImageType, got {} instead.\".format( [type(i) for i" ]
[ "args.output print 'Limit spider: %d' % args.limit # Grab today's", "'[debug] strip last char from baseurl' # mailto: is causing", "# get the proto, domain, path, file (TODO: for a", "except urllib2.URLError as e: # Not an HTTP-specific error (e.g.", "Usage: pylinkcheck.py -r https://www.example.com # # By default, we can", "print '[-] HTTP ERROR: %d - %s' % (e.code, checkurl)", "% href if re.match('^mailto', href): # skip this one continue", "NOT FOUND: %s' % item ####################################### # Main program #", "is working. # We'll iterate over the various directory paths", "add this URL to deadlink list deadlinks.append(checkurl) else: print '[-]", "'[-] NON-HTTP ERROR: %d - %s' % (e.code, checkurl) else:", "this in the Spider function try: if re.match('^http', href): checkurl", "% args.url print 'Output file format: %s' % args.format print", "print '[-] NON-HTTP ERROR: %d - %s' % (e.code, checkurl)", "example, a check of https://foo.example.com will only check # links", "import argparse import urllib2 import csv from datetime import datetime", "- assuming, for now, other protocols are not desired #", "the file from the path thisurl = urlparse(href) if thisurl.netloc", "79 print ' Link Checker Results\\n' if not deadlinks: print", "-o output.txt, --output=output.txt # limit depth: -l 2, --limit=2 #", "default, we can spider and check all of the links", "'': print '[-] HREF %s is out of scope' %", "an informative summary of the dead links def printReport(deadlinks): #", "HREF %s is out of scope' % thisurl.netloc outofscope =", "required=False, default='txt', help='Output file format ') parser.add_argument('-l','--limit', required=False, default=2, help='Limit", "and build our list of URLs to check spiderURL(baseurl, pathlimit)", "list based on each sub directory found print '[spider] path", "Grab today's date for timestamping output file. now = datetime.now()", "href except: print '[-] Unknown error in re.match()' try: #print", "based on each sub directory found print '[spider] path limit", "link checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output file format ') parser.add_argument('-l','--limit',", "all a href links checkurl = urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl,", "Python-based link checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output file format ')", "now, other protocols are not desired # - place this", "'html.parser') # Spider the site and build our list of", "outofscope = 0 # Check the URLs for link in", "may want to add a '/' to baseurl if it's", "our list of URLs to check spiderURL(baseurl, pathlimit) deadlinks =", "'/': # print '[debug] strip last char from baseurl' #", "try: #print '[+] checking %s' % checkurl hrefpage = urllib2.urlopen(checkurl)", "link. baseurl = str(args.url) pathlimit = int(args.limit) # Show values", "Link found to # bar.example.com will not be checked. #", "pathlimit # Print an informative summary of the dead links", "link in soup(\"a\"): # Fetch the link but only return", "# print each item in the deadlinks list or CLEAN", "--format=txt,html,xml ############################################################################## import argparse import urllib2 import csv from datetime", "re.match()' try: #print '[+] checking %s' % checkurl hrefpage =", "'Output file: %s' % args.output print 'Limit spider: %d' %", "urllib2.URLError as e: # Not an HTTP-specific error (e.g. connection", "and thisurl.netloc != '': print '[-] HREF %s is out", "- if the href links are relative we need to", "change once the spiderURL function is working. # We'll iterate", "= 1 else: print '[debug] path %s' % thisurl.path outofscope", "if the href is relative. # - assuming, for now,", "re.match('^mailto', href): # skip this one continue # Separate the", "to add the baseurl when checking # the link. baseurl", "Grab all a href links checkurl = urllib2.urlopen(baseurl).read() soup =", "print '[-] NOT FOUND: %s' % item ####################################### # Main", "for now, other protocols are not desired # - place", "tstamp = now.strftime(\"%Y%m%d-%H%M\") # Grab all a href links checkurl", "(c) 2016 <NAME> # # A Python-based link checker. #", "% args.limit # Grab today's date for timestamping output file.", "instead. outofscope = 0 # Check the URLs for link", "href links checkurl = urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser') #", "file (TODO: for a complete solution we # need to", "Results\\n' if not deadlinks: print '[+] CLEAN: No dead links", "outofscope = 0 # Build the full URL if the", "# - assuming, for now, other protocols are not desired", "to check spiderURL(baseurl, pathlimit) deadlinks = [] # This for", "href is relative. # - assuming, for now, other protocols", "parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL", "0 # Check the URLs for link in soup(\"a\"): #", "from urlparse import urlparse from bs4 import BeautifulSoup ####################################### #", "% item ####################################### # Main program # # Get command", "scope' % thisurl.netloc outofscope = 1 else: print '[debug] path", "print '[debug] strip last char from baseurl' # mailto: is", "date for timestamping output file. now = datetime.now() tstamp =", "# Spider the site and build our list of URLs", "if re.match('^http', href): checkurl = href else: checkurl = baseurl", "the proto, domain, path, file (TODO: for a complete solution", "# This for loop will completely change once the spiderURL", "%s' % (e.code, checkurl) except urllib2.URLError as e: # Not", "last char from baseurl' # mailto: is causing an error", "root (domain): this is simply required # generate report file:", "in the Spider function try: if re.match('^http', href): checkurl =", "CLEAN: No dead links found' else: for item in deadlinks:", "ERROR: %d - %s' % (e.code, checkurl) else: print '[+]", "on each sub directory found print '[spider] path limit set", "depth: -l 2, --limit=2 # TODO: report format: --format=txt,html,xml ##############################################################################", "= argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output file", "else: print '[-] HTTP ERROR: %d - %s' % (e.code,", "path, file (TODO: for a complete solution we # need", "(e.code, checkurl) except urllib2.URLError as e: # Not an HTTP-specific", "pylinkcheck.py -r https://www.example.com # # By default, we can spider", "in re.match()' try: #print '[+] checking %s' % checkurl hrefpage", "thisurl = urlparse(href) if thisurl.netloc != baseurl and thisurl.netloc !=", "Python-based link checker. # # Usage: pylinkcheck.py -r https://www.example.com #", "command line options parser = argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format',", "printReport(deadlinks): # print each item in the deadlinks list or", "are unpredicatable we can add a function to 'clean' them", "format: --format=txt,html,xml ############################################################################## import argparse import urllib2 import csv from", "causing an error href = link.get('href') print '[debug] href: %s'", "the full URL if the href is relative. # -", "timestamping output file. now = datetime.now() tstamp = now.strftime(\"%Y%m%d-%H%M\") #", "def spiderURL(baseurl, pathlimit): # build a list based on each", "of URLs to check spiderURL(baseurl, pathlimit) deadlinks = [] #", "%s' % thisurl.path outofscope = 0 # Build the full", "'\\n\\n' print '#' * 79 print ' Link Checker Results\\n'", "protocols are not desired # - place this in the", "from baseurl' # mailto: is causing an error href =", "report file: -o output.txt, --output=output.txt # limit depth: -l 2,", "Spider function try: if re.match('^http', href): checkurl = href else:", "default=2, help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to check',", "spiderURL(baseurl, pathlimit): # build a list based on each sub", "# Show values print 'Base URL: %s' % args.url print", "# Spider the base URL def spiderURL(baseurl, pathlimit): # build", "now.strftime(\"%Y%m%d-%H%M\") # Grab all a href links checkurl = urllib2.urlopen(baseurl).read()", "add the baseurl when checking # the link. baseurl =", "unpredicatable we can add a function to 'clean' them up,", "get all of this) #if baseurl[:-1] == '/': # print", "re.match('^http', href): checkurl = href else: checkurl = baseurl +", "# links with the base URL path of foo.example.com. Link", "in the deadlinks list or CLEAN if empty print '\\n\\n'", "program # # Get command line options parser = argparse.ArgumentParser(description='A", "code # hrefs are unpredicatable we can add a function", "<reponame>clayball/pylinkcheck<filename>pylinkcheck.py #!/usr/bin/env python # Copyright (c) 2016 <NAME> # #", "full URL if the href is relative. # - assuming,", "sub directory found print '[spider] path limit set to %d'", "it's not present. # - if the href links are", "checkurl hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError as e: if e.code", "else: for item in deadlinks: print '[-] NOT FOUND: %s'", "summary of the dead links def printReport(deadlinks): # print each", "import csv from datetime import datetime import re from urlparse", "but only return the status code # hrefs are unpredicatable", "# Get command line options parser = argparse.ArgumentParser(description='A Python-based link", "out of scope' % thisurl.netloc outofscope = 1 else: print", "parser.parse_args() # Assign program arguments to variables # - we", "dead links found' else: for item in deadlinks: print '[-]", "FOUND: %s' % item ####################################### # Main program # #", "# We'll iterate over the various directory paths instead. outofscope", "checkurl # add this URL to deadlink list deadlinks.append(checkurl) else:", "int(args.limit) # Show values print 'Base URL: %s' % args.url", "error in re.match()' try: #print '[+] checking %s' % checkurl", "solution we # need to get all of this) #if", "href): checkurl = href else: checkurl = baseurl + href", "hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError as e: if e.code ==", "Unknown error in re.match()' try: #print '[+] checking %s' %", "= urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser') # Spider the site", "from bs4 import BeautifulSoup ####################################### # Functions # Spider the", "# Functions # Spider the base URL def spiderURL(baseurl, pathlimit):", "'Output file format: %s' % args.format print 'Output file: %s'", "Check the URLs for link in soup(\"a\"): # Fetch the", "urlparse(href) if thisurl.netloc != baseurl and thisurl.netloc != '': print", "checkurl = href else: checkurl = baseurl + href except:", "of this) #if baseurl[:-1] == '/': # print '[debug] strip", "in deadlinks: print '[-] NOT FOUND: %s' % item #######################################", "other protocols are not desired # - place this in", "output.txt, --output=output.txt # limit depth: -l 2, --limit=2 # TODO:", "# need to get all of this) #if baseurl[:-1] ==", "check # links with the base URL path of foo.example.com.", "function try: if re.match('^http', href): checkurl = href else: checkurl", "to get all of this) #if baseurl[:-1] == '/': #", "e.code == 404: print '[-] 404 ERROR: %s' % checkurl", "complete solution we # need to get all of this)", "simply required # generate report file: -o output.txt, --output=output.txt #", "of foo.example.com. Link found to # bar.example.com will not be", "else: checkurl = baseurl + href except: print '[-] Unknown", "urllib2.HTTPError as e: if e.code == 404: print '[-] 404", "list of URLs to check spiderURL(baseurl, pathlimit) deadlinks = []", "%s' % args.format print 'Output file: %s' % args.output print", "file from the path thisurl = urlparse(href) if thisurl.netloc !=", "Show values print 'Base URL: %s' % args.url print 'Output", "to deadlink list deadlinks.append(checkurl) else: print '[-] HTTP ERROR: %d", "spider: %d' % args.limit # Grab today's date for timestamping", "help='Output file name', required=False) args = parser.parse_args() # Assign program", "empty print '\\n\\n' print '#' * 79 print ' Link", "the URLs for link in soup(\"a\"): # Fetch the link", "this) #if baseurl[:-1] == '/': # print '[debug] strip last", "of scope' % thisurl.netloc outofscope = 1 else: print '[debug]", "baseurl' # mailto: is causing an error href = link.get('href')", "Link Checker Results\\n' if not deadlinks: print '[+] CLEAN: No", "URL if the href is relative. # - assuming, for", "paths instead. outofscope = 0 # Check the URLs for", "all of this) #if baseurl[:-1] == '/': # print '[debug]", "is causing an error href = link.get('href') print '[debug] href:", "%s' % item ####################################### # Main program # # Get", "parser = argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output", "href = link.get('href') print '[debug] href: %s' % href if", "404 ERROR: %s' % checkurl # add this URL to", "deadlinks list or CLEAN if empty print '\\n\\n' print '#'", "Status %d for %s' % (hrefpage.getcode(), checkurl) printReport(deadlinks) # EOF", "# domain. For example, a check of https://foo.example.com will only", "%s' % checkurl # add this URL to deadlink list", "URL's # domain. For example, a check of https://foo.example.com will", "else: print '[+] Status %d for %s' % (hrefpage.getcode(), checkurl)", "# Grab all a href links checkurl = urllib2.urlopen(baseurl).read() soup", "to 'clean' them up, i.e., # get the proto, domain,", "links checkurl = urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser') # Spider", "'/' to baseurl if it's not present. # - if", "argparse import urllib2 import csv from datetime import datetime import", "############################################################################## import argparse import urllib2 import csv from datetime import", "# # Fancy run-time options # url root (domain): this", "list or CLEAN if empty print '\\n\\n' print '#' *", "href): # skip this one continue # Separate the file", "need to get all of this) #if baseurl[:-1] == '/':", "####################################### # Main program # # Get command line options", "TODO: report format: --format=txt,html,xml ############################################################################## import argparse import urllib2 import", "for timestamping output file. now = datetime.now() tstamp = now.strftime(\"%Y%m%d-%H%M\")", "will only check # links with the base URL path", "to baseurl if it's not present. # - if the", "== '/': # print '[debug] strip last char from baseurl'", "% args.output print 'Limit spider: %d' % args.limit # Grab", "loop will completely change once the spiderURL function is working.", "# Separate the file from the path thisurl = urlparse(href)", "link.get('href') print '[debug] href: %s' % href if re.match('^mailto', href):", "#!/usr/bin/env python # Copyright (c) 2016 <NAME> # # A", "'[spider] path limit set to %d' % pathlimit # Print", "check of https://foo.example.com will only check # links with the", "base URL path of foo.example.com. Link found to # bar.example.com", "'#' * 79 print ' Link Checker Results\\n' if not", "print '[-] Unknown error in re.match()' try: #print '[+] checking", "# A Python-based link checker. # # Usage: pylinkcheck.py -r", "directory found print '[spider] path limit set to %d' %", "'Base URL: %s' % args.url print 'Output file format: %s'", "# Grab today's date for timestamping output file. now =", "HTTP ERROR: %d - %s' % (e.code, checkurl) except urllib2.URLError", "of https://foo.example.com will only check # links with the base", "datetime import datetime import re from urlparse import urlparse from", "function is working. # We'll iterate over the various directory", "default='txt', help='Output file format ') parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory", "connection refused) print '[-] NON-HTTP ERROR: %d - %s' %", "each sub directory found print '[spider] path limit set to", "for link in soup(\"a\"): # Fetch the link but only", "found to # bar.example.com will not be checked. # #", "variables # - we may want to add a '/'", "deadlinks = [] # This for loop will completely change", "# # Usage: pylinkcheck.py -r https://www.example.com # # By default,", "options parser = argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format', required=False, default='txt',", "# Usage: pylinkcheck.py -r https://www.example.com # # By default, we", "required=True) parser.add_argument('-o','--output', help='Output file name', required=False) args = parser.parse_args() #", "want to add a '/' to baseurl if it's not", "print '[-] HREF %s is out of scope' % thisurl.netloc", "href: %s' % href if re.match('^mailto', href): # skip this", "% (e.code, checkurl) except urllib2.URLError as e: # Not an", "Main program # # Get command line options parser =", "from datetime import datetime import re from urlparse import urlparse", "ERROR: %d - %s' % (e.code, checkurl) except urllib2.URLError as", "we may want to add a '/' to baseurl if", "checking # the link. baseurl = str(args.url) pathlimit = int(args.limit)", "thisurl.netloc != baseurl and thisurl.netloc != '': print '[-] HREF", "as e: if e.code == 404: print '[-] 404 ERROR:", "check all of the links found at the URL's #", "working. # We'll iterate over the various directory paths instead.", "if it's not present. # - if the href links", "Copyright (c) 2016 <NAME> # # A Python-based link checker.", "generate report file: -o output.txt, --output=output.txt # limit depth: -l", "' Link Checker Results\\n' if not deadlinks: print '[+] CLEAN:", "not present. # - if the href links are relative", "checked. # # Fancy run-time options # url root (domain):", "# By default, we can spider and check all of", "site and build our list of URLs to check spiderURL(baseurl,", "- %s' % (e.code, checkurl) except urllib2.URLError as e: #", "a check of https://foo.example.com will only check # links with", "the links found at the URL's # domain. For example,", "deadlinks: print '[-] NOT FOUND: %s' % item ####################################### #", "a complete solution we # need to get all of", "base URL def spiderURL(baseurl, pathlimit): # build a list based", "e: if e.code == 404: print '[-] 404 ERROR: %s'", "print '[+] Status %d for %s' % (hrefpage.getcode(), checkurl) printReport(deadlinks)", "Separate the file from the path thisurl = urlparse(href) if", "required=False, default=2, help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to", "%s is out of scope' % thisurl.netloc outofscope = 1", "%s' % args.output print 'Limit spider: %d' % args.limit #", "Not an HTTP-specific error (e.g. connection refused) print '[-] NON-HTTP", "Checker Results\\n' if not deadlinks: print '[+] CLEAN: No dead", "add a '/' to baseurl if it's not present. #", "iterate over the various directory paths instead. outofscope = 0", "print '[+] CLEAN: No dead links found' else: for item", "links are relative we need to add the baseurl when", "URL to deadlink list deadlinks.append(checkurl) else: print '[-] HTTP ERROR:", "pathlimit): # build a list based on each sub directory", "urllib2 import csv from datetime import datetime import re from", "python # Copyright (c) 2016 <NAME> # # A Python-based", "is simply required # generate report file: -o output.txt, --output=output.txt", "file name', required=False) args = parser.parse_args() # Assign program arguments", "print 'Output file: %s' % args.output print 'Limit spider: %d'", "NON-HTTP ERROR: %d - %s' % (e.code, checkurl) else: print", "2, --limit=2 # TODO: report format: --format=txt,html,xml ############################################################################## import argparse", "'[+] CLEAN: No dead links found' else: for item in", "outofscope = 1 else: print '[debug] path %s' % thisurl.path", "a href links checkurl = urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser')", "links found' else: for item in deadlinks: print '[-] NOT", "to # bar.example.com will not be checked. # # Fancy", "limit depth: -l 2, --limit=2 # TODO: report format: --format=txt,html,xml", "1 else: print '[debug] path %s' % thisurl.path outofscope =", "will completely change once the spiderURL function is working. #", "help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to check', required=True)", "the link. baseurl = str(args.url) pathlimit = int(args.limit) # Show", "= datetime.now() tstamp = now.strftime(\"%Y%m%d-%H%M\") # Grab all a href", "directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to check', required=True) parser.add_argument('-o','--output',", "We'll iterate over the various directory paths instead. outofscope =", "refused) print '[-] NON-HTTP ERROR: %d - %s' % (e.code,", "the various directory paths instead. outofscope = 0 # Check", "# Print an informative summary of the dead links def", "For example, a check of https://foo.example.com will only check #", "href if re.match('^mailto', href): # skip this one continue #", "+ href except: print '[-] Unknown error in re.match()' try:", "Assign program arguments to variables # - we may want", "except: print '[-] Unknown error in re.match()' try: #print '[+]", "thisurl.path outofscope = 0 # Build the full URL if", "if thisurl.netloc != baseurl and thisurl.netloc != '': print '[-]", "Spider the base URL def spiderURL(baseurl, pathlimit): # build a", "HTTP-specific error (e.g. connection refused) print '[-] NON-HTTP ERROR: %d", "once the spiderURL function is working. # We'll iterate over", "Functions # Spider the base URL def spiderURL(baseurl, pathlimit): #", "# - if the href links are relative we need", "baseurl and thisurl.netloc != '': print '[-] HREF %s is", "#if baseurl[:-1] == '/': # print '[debug] strip last char", "!= '': print '[-] HREF %s is out of scope'", "can spider and check all of the links found at", "= str(args.url) pathlimit = int(args.limit) # Show values print 'Base", "this URL to deadlink list deadlinks.append(checkurl) else: print '[-] HTTP", "urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser') # Spider the site and", "to add a '/' to baseurl if it's not present.", "print each item in the deadlinks list or CLEAN if", "URL path of foo.example.com. Link found to # bar.example.com will", "= urlparse(href) if thisurl.netloc != baseurl and thisurl.netloc != '':", "# Assign program arguments to variables # - we may", "By default, we can spider and check all of the", "re from urlparse import urlparse from bs4 import BeautifulSoup #######################################", "parser.add_argument('-f','--format', required=False, default='txt', help='Output file format ') parser.add_argument('-l','--limit', required=False, default=2,", "found at the URL's # domain. For example, a check", "= href else: checkurl = baseurl + href except: print", "i.e., # get the proto, domain, path, file (TODO: for", "# - we may want to add a '/' to", "https://foo.example.com will only check # links with the base URL", "path limit set to %d' % pathlimit # Print an", "deadlink list deadlinks.append(checkurl) else: print '[-] HTTP ERROR: %d -", "= link.get('href') print '[debug] href: %s' % href if re.match('^mailto',", "deadlinks.append(checkurl) else: print '[-] HTTP ERROR: %d - %s' %", "#print '[+] checking %s' % checkurl hrefpage = urllib2.urlopen(checkurl) except", "build our list of URLs to check spiderURL(baseurl, pathlimit) deadlinks", "the base URL path of foo.example.com. Link found to #", "check spiderURL(baseurl, pathlimit) deadlinks = [] # This for loop", "get the proto, domain, path, file (TODO: for a complete", "or CLEAN if empty print '\\n\\n' print '#' * 79", "strip last char from baseurl' # mailto: is causing an", "- %s' % (e.code, checkurl) else: print '[+] Status %d", "args.format print 'Output file: %s' % args.output print 'Limit spider:", "datetime.now() tstamp = now.strftime(\"%Y%m%d-%H%M\") # Grab all a href links", "Spider the site and build our list of URLs to", "one continue # Separate the file from the path thisurl", "= 0 # Build the full URL if the href", "urllib2.urlopen(checkurl) except urllib2.HTTPError as e: if e.code == 404: print", "a list based on each sub directory found print '[spider]", "path thisurl = urlparse(href) if thisurl.netloc != baseurl and thisurl.netloc", "file. now = datetime.now() tstamp = now.strftime(\"%Y%m%d-%H%M\") # Grab all", "checkurl = urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser') # Spider the", "the baseurl when checking # the link. baseurl = str(args.url)", "% thisurl.path outofscope = 0 # Build the full URL", "bs4 import BeautifulSoup ####################################### # Functions # Spider the base", "for a complete solution we # need to get all", "'[debug] path %s' % thisurl.path outofscope = 0 # Build", "URL: %s' % args.url print 'Output file format: %s' %", "name', required=False) args = parser.parse_args() # Assign program arguments to", "with the base URL path of foo.example.com. Link found to", "args.limit # Grab today's date for timestamping output file. now", "baseurl + href except: print '[-] Unknown error in re.match()'", "each item in the deadlinks list or CLEAN if empty", "URLs to check spiderURL(baseurl, pathlimit) deadlinks = [] # This", "item in the deadlinks list or CLEAN if empty print", "example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to check', required=True) parser.add_argument('-o','--output', help='Output file", "') parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base", "bar.example.com will not be checked. # # Fancy run-time options", "# # By default, we can spider and check all", "'[-] 404 ERROR: %s' % checkurl # add this URL", "the link but only return the status code # hrefs", "path of foo.example.com. Link found to # bar.example.com will not", "and check all of the links found at the URL's", "error (e.g. connection refused) print '[-] NON-HTTP ERROR: %d -", "domain, path, file (TODO: for a complete solution we #", "print 'Output file format: %s' % args.format print 'Output file:", "print '[debug] href: %s' % href if re.match('^mailto', href): #", "# url root (domain): this is simply required # generate", "argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output file format", "parser.add_argument('-u','--url', help='Base URL to check', required=True) parser.add_argument('-o','--output', help='Output file name',", "% checkurl # add this URL to deadlink list deadlinks.append(checkurl)", "required=False) args = parser.parse_args() # Assign program arguments to variables", "* 79 print ' Link Checker Results\\n' if not deadlinks:", "deadlinks: print '[+] CLEAN: No dead links found' else: for", "relative we need to add the baseurl when checking #", "format: %s' % args.format print 'Output file: %s' % args.output", "import BeautifulSoup ####################################### # Functions # Spider the base URL", "run-time options # url root (domain): this is simply required", "print '#' * 79 print ' Link Checker Results\\n' if", "'[-] HTTP ERROR: %d - %s' % (e.code, checkurl) except", "mailto: is causing an error href = link.get('href') print '[debug]", "= parser.parse_args() # Assign program arguments to variables # -", "the spiderURL function is working. # We'll iterate over the", "= now.strftime(\"%Y%m%d-%H%M\") # Grab all a href links checkurl =", "Print an informative summary of the dead links def printReport(deadlinks):", "relative. # - assuming, for now, other protocols are not", "an HTTP-specific error (e.g. connection refused) print '[-] NON-HTTP ERROR:", "to check', required=True) parser.add_argument('-o','--output', help='Output file name', required=False) args =", "the dead links def printReport(deadlinks): # print each item in", "baseurl when checking # the link. baseurl = str(args.url) pathlimit", "spider and check all of the links found at the", "'Limit spider: %d' % args.limit # Grab today's date for", "status code # hrefs are unpredicatable we can add a", "file format ') parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory depth, example.com/limit/dir/depth/')", "%s' % checkurl hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError as e:", "# Fancy run-time options # url root (domain): this is", "only check # links with the base URL path of", "up, i.e., # get the proto, domain, path, file (TODO:", "for loop will completely change once the spiderURL function is", "= baseurl + href except: print '[-] Unknown error in", "Get command line options parser = argparse.ArgumentParser(description='A Python-based link checker.')", "# add this URL to deadlink list deadlinks.append(checkurl) else: print", "-r https://www.example.com # # By default, we can spider and", "add a function to 'clean' them up, i.e., # get", "help='Base URL to check', required=True) parser.add_argument('-o','--output', help='Output file name', required=False)", "set to %d' % pathlimit # Print an informative summary", "list deadlinks.append(checkurl) else: print '[-] HTTP ERROR: %d - %s'", "'clean' them up, i.e., # get the proto, domain, path,", "Fancy run-time options # url root (domain): this is simply", "link checker. # # Usage: pylinkcheck.py -r https://www.example.com # #", "print ' Link Checker Results\\n' if not deadlinks: print '[+]", "place this in the Spider function try: if re.match('^http', href):", "if re.match('^mailto', href): # skip this one continue # Separate", "them up, i.e., # get the proto, domain, path, file", "all of the links found at the URL's # domain.", "%d' % pathlimit # Print an informative summary of the", "!= baseurl and thisurl.netloc != '': print '[-] HREF %s", "- place this in the Spider function try: if re.match('^http',", "# Main program # # Get command line options parser", "# TODO: report format: --format=txt,html,xml ############################################################################## import argparse import urllib2", "file: -o output.txt, --output=output.txt # limit depth: -l 2, --limit=2", "import re from urlparse import urlparse from bs4 import BeautifulSoup", "a '/' to baseurl if it's not present. # -", "print 'Base URL: %s' % args.url print 'Output file format:", "present. # - if the href links are relative we", "the href links are relative we need to add the", "an error href = link.get('href') print '[debug] href: %s' %", "No dead links found' else: for item in deadlinks: print", "= urllib2.urlopen(checkurl) except urllib2.HTTPError as e: if e.code == 404:", "baseurl = str(args.url) pathlimit = int(args.limit) # Show values print", "we # need to get all of this) #if baseurl[:-1]", "dead links def printReport(deadlinks): # print each item in the", "is out of scope' % thisurl.netloc outofscope = 1 else:", "datetime import re from urlparse import urlparse from bs4 import", "item ####################################### # Main program # # Get command line", "now = datetime.now() tstamp = now.strftime(\"%Y%m%d-%H%M\") # Grab all a", "path %s' % thisurl.path outofscope = 0 # Build the", "if empty print '\\n\\n' print '#' * 79 print '", "args.url print 'Output file format: %s' % args.format print 'Output", "this is simply required # generate report file: -o output.txt,", "found print '[spider] path limit set to %d' % pathlimit", "%s' % args.url print 'Output file format: %s' % args.format", "# the link. baseurl = str(args.url) pathlimit = int(args.limit) #", "= BeautifulSoup(checkurl, 'html.parser') # Spider the site and build our", "hrefs are unpredicatable we can add a function to 'clean'", "try: if re.match('^http', href): checkurl = href else: checkurl =", "baseurl if it's not present. # - if the href", "%s' % (e.code, checkurl) else: print '[+] Status %d for", "URL def spiderURL(baseurl, pathlimit): # build a list based on", "over the various directory paths instead. outofscope = 0 #", "from the path thisurl = urlparse(href) if thisurl.netloc != baseurl", "we need to add the baseurl when checking # the", "thisurl.netloc outofscope = 1 else: print '[debug] path %s' %", "error href = link.get('href') print '[debug] href: %s' % href", "'[+] Status %d for %s' % (hrefpage.getcode(), checkurl) printReport(deadlinks) #", "the site and build our list of URLs to check", "%d - %s' % (e.code, checkurl) except urllib2.URLError as e:", "import datetime import re from urlparse import urlparse from bs4", "report format: --format=txt,html,xml ############################################################################## import argparse import urllib2 import csv", "%d' % args.limit # Grab today's date for timestamping output", "informative summary of the dead links def printReport(deadlinks): # print", "# skip this one continue # Separate the file from", "href else: checkurl = baseurl + href except: print '[-]", "import urlparse from bs4 import BeautifulSoup ####################################### # Functions #", "check', required=True) parser.add_argument('-o','--output', help='Output file name', required=False) args = parser.parse_args()", "baseurl[:-1] == '/': # print '[debug] strip last char from", "this one continue # Separate the file from the path", "CLEAN if empty print '\\n\\n' print '#' * 79 print", "is relative. # - assuming, for now, other protocols are", "# bar.example.com will not be checked. # # Fancy run-time", "the Spider function try: if re.match('^http', href): checkurl = href", "as e: # Not an HTTP-specific error (e.g. connection refused)", "desired # - place this in the Spider function try:", "limit set to %d' % pathlimit # Print an informative", "of the dead links def printReport(deadlinks): # print each item", "if the href links are relative we need to add", "'[debug] href: %s' % href if re.match('^mailto', href): # skip", "-l 2, --limit=2 # TODO: report format: --format=txt,html,xml ############################################################################## import", "# Not an HTTP-specific error (e.g. connection refused) print '[-]", "checkurl = baseurl + href except: print '[-] Unknown error", "proto, domain, path, file (TODO: for a complete solution we", "links with the base URL path of foo.example.com. Link found", "'[+] checking %s' % checkurl hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError", "not deadlinks: print '[+] CLEAN: No dead links found' else:", "char from baseurl' # mailto: is causing an error href", "will not be checked. # # Fancy run-time options #", "% thisurl.netloc outofscope = 1 else: print '[debug] path %s'", "options # url root (domain): this is simply required #", "# - place this in the Spider function try: if", "%d - %s' % (e.code, checkurl) else: print '[+] Status", "print '\\n\\n' print '#' * 79 print ' Link Checker", "print '[-] 404 ERROR: %s' % checkurl # add this", "the status code # hrefs are unpredicatable we can add", "a function to 'clean' them up, i.e., # get the", "'[-] Unknown error in re.match()' try: #print '[+] checking %s'", "spiderURL function is working. # We'll iterate over the various", "are relative we need to add the baseurl when checking", "'[-] HREF %s is out of scope' % thisurl.netloc outofscope", "--output=output.txt # limit depth: -l 2, --limit=2 # TODO: report", "else: print '[debug] path %s' % thisurl.path outofscope = 0", "# mailto: is causing an error href = link.get('href') print", "item in deadlinks: print '[-] NOT FOUND: %s' % item", "links def printReport(deadlinks): # print each item in the deadlinks", "URL to check', required=True) parser.add_argument('-o','--output', help='Output file name', required=False) args", "'[-] NOT FOUND: %s' % item ####################################### # Main program", "the href is relative. # - assuming, for now, other", "directory paths instead. outofscope = 0 # Check the URLs", "Build the full URL if the href is relative. #", "format ') parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url',", "checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output file format ') parser.add_argument('-l','--limit', required=False,", "required # generate report file: -o output.txt, --output=output.txt # limit", "the deadlinks list or CLEAN if empty print '\\n\\n' print", "if not deadlinks: print '[+] CLEAN: No dead links found'", "str(args.url) pathlimit = int(args.limit) # Show values print 'Base URL:", "= 0 # Check the URLs for link in soup(\"a\"):", "URLs for link in soup(\"a\"): # Fetch the link but", "def printReport(deadlinks): # print each item in the deadlinks list", "Fetch the link but only return the status code #", "% (e.code, checkurl) else: print '[+] Status %d for %s'", "BeautifulSoup ####################################### # Functions # Spider the base URL def", "0 # Build the full URL if the href is", "# generate report file: -o output.txt, --output=output.txt # limit depth:", "# build a list based on each sub directory found", "pathlimit) deadlinks = [] # This for loop will completely", "skip this one continue # Separate the file from the", "https://www.example.com # # By default, we can spider and check", "to %d' % pathlimit # Print an informative summary of", "# hrefs are unpredicatable we can add a function to", "# limit depth: -l 2, --limit=2 # TODO: report format:", "print '[spider] path limit set to %d' % pathlimit #", "found' else: for item in deadlinks: print '[-] NOT FOUND:", "2016 <NAME> # # A Python-based link checker. # #", "e: # Not an HTTP-specific error (e.g. connection refused) print", "soup(\"a\"): # Fetch the link but only return the status", "(e.g. connection refused) print '[-] NON-HTTP ERROR: %d - %s'", "checker. # # Usage: pylinkcheck.py -r https://www.example.com # # By", "for item in deadlinks: print '[-] NOT FOUND: %s' %", "we can spider and check all of the links found", "This for loop will completely change once the spiderURL function", "line options parser = argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format', required=False,", "output file. now = datetime.now() tstamp = now.strftime(\"%Y%m%d-%H%M\") # Grab", "[] # This for loop will completely change once the", "# Check the URLs for link in soup(\"a\"): # Fetch", "urlparse import urlparse from bs4 import BeautifulSoup ####################################### # Functions", "various directory paths instead. outofscope = 0 # Check the", "if e.code == 404: print '[-] 404 ERROR: %s' %", "in soup(\"a\"): # Fetch the link but only return the", "# Fetch the link but only return the status code", "function to 'clean' them up, i.e., # get the proto,", "- we may want to add a '/' to baseurl", "foo.example.com. Link found to # bar.example.com will not be checked.", "the base URL def spiderURL(baseurl, pathlimit): # build a list", "file: %s' % args.output print 'Limit spider: %d' % args.limit", "print '[debug] path %s' % thisurl.path outofscope = 0 #", "(e.code, checkurl) else: print '[+] Status %d for %s' %", "return the status code # hrefs are unpredicatable we can", "print 'Limit spider: %d' % args.limit # Grab today's date", "--limit=2 # TODO: report format: --format=txt,html,xml ############################################################################## import argparse import", "parser.add_argument('-o','--output', help='Output file name', required=False) args = parser.parse_args() # Assign", "spiderURL(baseurl, pathlimit) deadlinks = [] # This for loop will", "= [] # This for loop will completely change once", "the URL's # domain. For example, a check of https://foo.example.com", "BeautifulSoup(checkurl, 'html.parser') # Spider the site and build our list", "except urllib2.HTTPError as e: if e.code == 404: print '[-]", "== 404: print '[-] 404 ERROR: %s' % checkurl #", "values print 'Base URL: %s' % args.url print 'Output file", "404: print '[-] 404 ERROR: %s' % checkurl # add", "# Build the full URL if the href is relative.", "= int(args.limit) # Show values print 'Base URL: %s' %", "arguments to variables # - we may want to add", "ERROR: %s' % checkurl # add this URL to deadlink", "link but only return the status code # hrefs are", "# Copyright (c) 2016 <NAME> # # A Python-based link", "to variables # - we may want to add a", "completely change once the spiderURL function is working. # We'll", "# # A Python-based link checker. # # Usage: pylinkcheck.py", "href links are relative we need to add the baseurl", "soup = BeautifulSoup(checkurl, 'html.parser') # Spider the site and build", "thisurl.netloc != '': print '[-] HREF %s is out of", "checkurl) else: print '[+] Status %d for %s' % (hrefpage.getcode(),", "A Python-based link checker. # # Usage: pylinkcheck.py -r https://www.example.com", "domain. For example, a check of https://foo.example.com will only check", "<NAME> # # A Python-based link checker. # # Usage:", "csv from datetime import datetime import re from urlparse import", "assuming, for now, other protocols are not desired # -", "# # Get command line options parser = argparse.ArgumentParser(description='A Python-based", "today's date for timestamping output file. now = datetime.now() tstamp", "url root (domain): this is simply required # generate report", "links found at the URL's # domain. For example, a", "args = parser.parse_args() # Assign program arguments to variables #", "program arguments to variables # - we may want to", "need to add the baseurl when checking # the link.", "(TODO: for a complete solution we # need to get", "checking %s' % checkurl hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError as", "checkurl) except urllib2.URLError as e: # Not an HTTP-specific error", "the path thisurl = urlparse(href) if thisurl.netloc != baseurl and", "file format: %s' % args.format print 'Output file: %s' %", "we can add a function to 'clean' them up, i.e.,", "% args.format print 'Output file: %s' % args.output print 'Limit", "continue # Separate the file from the path thisurl =", "can add a function to 'clean' them up, i.e., #", "build a list based on each sub directory found print", "when checking # the link. baseurl = str(args.url) pathlimit =", "help='Output file format ') parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory depth,", "pathlimit = int(args.limit) # Show values print 'Base URL: %s'", "be checked. # # Fancy run-time options # url root", "% pathlimit # Print an informative summary of the dead", "urlparse from bs4 import BeautifulSoup ####################################### # Functions # Spider", "####################################### # Functions # Spider the base URL def spiderURL(baseurl,", "(domain): this is simply required # generate report file: -o", "% checkurl hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError as e: if", "import urllib2 import csv from datetime import datetime import re", "not desired # - place this in the Spider function", "depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to check', required=True) parser.add_argument('-o','--output', help='Output", "%s' % href if re.match('^mailto', href): # skip this one", "only return the status code # hrefs are unpredicatable we", "not be checked. # # Fancy run-time options # url", "are not desired # - place this in the Spider", "# print '[debug] strip last char from baseurl' # mailto:", "of the links found at the URL's # domain. For", "at the URL's # domain. For example, a check of" ]
[ "logging.debug( \"Continue where previous parsing ended {token_pos}\".format( token_pos=self.token_pos ) )", "\"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod def _nestable_class(cls): return", "token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionRemoveActionParser only gets called", "For nodes that can be nested in themselves (recursive) but", "of the next token to be processed Returns: str: value", "no special characters - ATTRIBUTE_NAME: A placeholder that has no", "# noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls, token): \"\"\"REMOVE", "Path and right child Value. \"\"\" @classmethod def _is_possible_start(cls, token):", "= deque() def _parse_target_clause(self, factory_class): \"\"\" Args: factory_class: The factory", "/ | \\ UpdateExpressionValue BinOp Operand / | | |", "reached\") break elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return self._create_node() @classmethod", "expecting a RemoveAction. So we should be aggressive on raising", "UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert", "DOT is in a path expression it is never part", "!= \"REMOVE\": \"\"\"We have to make sure remove is not", "noinspection PyProtectedMember ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( \"Continue where", "For simplicity docstring will use Operand Node rather than the", "\"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise", "== 0: logging.debug( \"Didn't encounter a single {nc} in {nepc}.\".format(", "UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction,", "multiple times. This Mixin adds re-usability for that type of", "factory( **self._initializer_args() )._parse_with_pos() return node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self):", "next token.\".format( class_name=cls._nestable_class().__name__ ) ) @classmethod @abstractmethod def _nestable_class(cls): return", "@abstractmethod def _nestable_class(cls): \"\"\" Get the class of the Node", "= [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def _is_possible_start(cls, token): return token.type in", "def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET)", "NestableExpressionParserMixin): \"\"\" Parser to create update expressions \"\"\" @classmethod def", "for an example. Returns: dict: A dictionary of the initializer", "of an UpdateExpression: For example SET a=3 REMOVE b UpdateExpression", "the example in the docstring this would be UpdateExpression Returns:", "raise InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser): \"\"\" For nodes that can", "None\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos - 1].type else:", "return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A grouped value is an", "a + :val - :val2 UpdateExpressionValue / | \\ UpdateExpressionValue", "elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self):", "0: return self.token_list[self.token_pos - 1].type else: return None def get_2nd_last_token_value_if_last_was_whitespace(self):", "make sure remove is not passed\"\"\" return True return False", "the example of an UpdateExpression: For example SET a=3 REMOVE", "return token.type == Token.ATTRIBUTE and token.value.upper() == \"SET\" def _parse(self):", "name that is not allowed in an UpdateExpression) - DOT's:", "\"\"\" Args: factory_class: The factory for the target clause e.g.", "try: return self.token_list[self.token_pos] except IndexError: return None def get_next_token_value(self): \"\"\"", "def skip_white_space(self): try: while self.get_next_token_type() == Token.WHITESPACE: self.token_pos += 1", "like: ( a >> + >> :val >> - >>", "assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() #", "**kwargs): self.target_clauses = deque() def _parse_target_clause(self, factory_class): \"\"\" Args: factory_class:", "to process a function of an Update Expression \"\"\" #", "NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls, token): raise RuntimeError( \"{class_name} cannot be", "UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import", "is taken since it allows to remain the ordering of", "- ATTRIBUTE_NAME: A placeholder that has no special characters except", "UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause => REMOVE RemoveActions \"\"\" def _parse(self): assert", "REMOVE b ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing", "=> a + :val - :val2 UpdateExpressionValue / | \\", "1 < len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser):", "times. This Mixin adds re-usability for that type of pattern.", "brackets. Each Operand can be a grouped value by itself.", "nodes in order of encountering. Go through them forward and", "whitespace if there are any 3) Process equal-sign token 4)", "which is root node of resulting abstract syntax tree \"\"\"", "and skip all whitespaces\"\"\" self.token_pos += 1 self.skip_white_space() def raise_unexpected_token(self):", "an UpdateExpressionValue: For example value => a + :val -", "value if the token is of type `token_type` \"\"\" if", "return UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls, token): return token.type == Token.OPEN_ROUND_BRACKET", "ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction,", "These are used to decent in a nested structure. When", "is the start of a function. \"\"\" if token.type ==", "the Expression as produced by the factory. \"\"\" if len(self.target_nodes)", "self.target_nodes.popleft(), ] ) while len(self.target_nodes) >= 2: target_node = UpdateExpressionValue(", "UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction => Path = Value So", "= UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class", "Mixin adds re-usability for that type of pattern. This approach", "\"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path])", "self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] function_arguments = self.FUNCTIONS[function_name] for i, func_elem_factory", "PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls, token): \"\"\"REMOVE is not", "token): return token.type == Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN,", "factory class {fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__ ) ) # noinspection PyProtectedMember", "= \"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] )", "= self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) > 0: target_node = self._nestable_class()( children=[self.target_clauses.pop(),", "children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) assert len(self.target_nodes) == 0", "@abstractmethod def _nestable_class(cls): return UpdateExpressionSetActions @classmethod @abstractmethod def _nested_expression_parser_class(cls): \"\"\"Returns", "if there are any \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args()", "PyProtectedMember ast, self.token_pos = factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug( \"Continue", "_nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod def _nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class", "NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls): return UpdateExpression def _parse_expression_clause(self, factory_class): return", "path 2) skip whitespace if there are any \"\"\" path,", "have positive indexes\" logging.debug(\"We are out of range so end", "_is_possible_start(cls, token): return token.type == Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self):", "token_pos=token_pos ) ) self.token_pos = token_pos @abstractmethod def _initializer_args(self): \"\"\"", "Args: expression_token_list: token_pos(int): Location where parsing is \"\"\" self.token_list =", "def is_at_end(self): \"\"\"Return boolean indicating whether we are at end", "optional spaces - pass closing bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value =", "a function Args: token(Token): the token to check Returns: bool:", "\"Move token pos {pos} to continue parsing with specific factory", "return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self):", "# noinspection PyProtectedMember ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( \"Continue", "factory_class): return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for sub_factory in self._sub_factories(): if", "class of the Node that will be created that would", "extending ExpressionParser \"\"\" def _create_node(self): \"\"\" target_clauses has the nodes", "from token_pos for the factory type and also return the", "expression it is never part of an attribute name but", "following actions: - skip opening bracket - skip optional spaces", "must be separated with DOT's. Returns: UpdateExpressionPath: \"\"\" self.parse_path_chain() while", "is a possible start for entries processed by `cls` \"\"\"", "ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause => SET SetActions", "ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, )", "the token type after the one that is being parsed", "ExpressionTokenizer class NestableExpressionParserMixin(object): \"\"\" For nodes that can be nested", "we create an UpdateExpressionSetAction Node that has 2 children. Left", "return token.type == Token.ATTRIBUTE and token.value.upper() == \"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser):", "parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self): if", "\"\"\" A grouped value is an Update Expression value clause", "len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause", "token_pos for the factory type. Returns: moto.dynamodb2.ast_nodes.Node: AST which is", "- Attribute: the name of an attribute as how it", "TargetClause* NestableExpression This pattern comes back multiple times. This Mixin", "expression_str): token_list = ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause", "\"\"\" Paths are selectors within items to specify a part", "type of pattern. This approach is taken since it allows", "self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self): if self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value()))", "processed. So do the following actions: - skip opening bracket", "= self.get_next_token_value() assert operation_value in self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class", "the name of an attribute as how it is stored", "optional spaces - read numeric literal - skip optional spaces", "ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from", "else: return \"\" def get_following_token_value(self): \"\"\"Get the token value after", "Token.COMMA: self.goto_next_significant_token() else: break if len(self.target_clauses) == 0: logging.debug( \"Didn't", "an UpdateExpressionPath \"\"\" if token.type == Token.ATTRIBUTE_NAME: return True elif", "start of a function. \"\"\" if token.type == Token.ATTRIBUTE: return", "=> Path = Value So we create an UpdateExpressionSetAction Node", "ast, self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return", "have a name that is not allowed in an UpdateExpression)", "to the factories for its elements FUNCTIONS = { \"if_not_exists\":", "AttributeValue Operand* => UpdateExpressionFunction Operand* => Path Operand* => GroupedValue", "the 2nd following token that was correctly parsed if 1st", "of the Nodes as how the corresponding tokens where in", "moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath,", "def _sub_factories(cls): return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod", "select an element in ordered datatypes like a list. Whitespaces", "cn=self.__class__.__name__ ) target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) > 0: target_node", "Grammar Operand* => AttributeValue Operand* => UpdateExpressionFunction Operand* => Path", "Node of an AST representing the Expression as produced by", "whitespaces\"\"\" self.token_pos += 1 self.skip_white_space() def raise_unexpected_token(self): if self.is_at_end(): problematic_token", "can be followed by others. Process SetActions one by one", "self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def process_selector(self): \"\"\" Process the selector is", "func_elem_factory in enumerate(function_arguments): func_elem, self.token_pos = func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem)", "factory.is_possible_start(self.get_next_token()): node, self.token_pos = factory( **self._initializer_args() )._parse_with_pos() return node self.raise_unexpected_token()", "UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls, token): \"\"\"REMOVE is not a keyword\"\"\"", "remove is not passed\"\"\" return True return False def _parse(self):", "self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser):", "\"REMOVE\": \"\"\"We have to make sure remove is not passed\"\"\"", "if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos()", "): token, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else: token, self.token_pos", "get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the 2nd following token that was correctly parsed", "a list. Whitespaces can be between all these elements that", "_parse(self): \"\"\" UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction.", "Token.ATTRIBUTE and token.value.upper() == \"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\"", ")._parse_with_pos() return UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths are selectors", "in themselves (recursive) but with an operation. Take for example", "1].value except IndexError: return \"\" def get_following_token_type(self): \"\"\"Get the token", "= self.get_next_token_value() self.goto_next_significant_token() return token_value else: self.raise_unexpected_token() def goto_next_significant_token(self): \"\"\"Continue", "sure remove is not passed\"\"\" return True return False def", "Parser to create update expressions \"\"\" @classmethod def _sub_factories(cls): return", "[ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], \"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod def", "class. See ExpressionParser for an example. Returns: dict: A dictionary", "are represented in UpdateExpression's. \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionPathParser,", "Token.ATTRIBUTE_NAME: return True elif token.type == Token.ATTRIBUTE and token.value.upper() !=", "UpdateExpressionPath(children=self.path_nodes) def parse_path(self): \"\"\" A path is comprised of: -", "UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value", "return self._create_node() @classmethod def make(cls, expression_str): token_list = ExpressionTokenizer.make_list(expression_str) return", "\"\"\" Get the class of the Node that will be", "- 2].value else: return \"\" def get_following_token_value(self): \"\"\"Get the token", "we are at end of the parsing\"\"\" return self.token_pos ==", "else: return \"\" def skip_white_space(self): try: while self.get_next_token_type() == Token.WHITESPACE:", "only called when a selector must be processed. So do", "import Token, ExpressionTokenizer class NestableExpressionParserMixin(object): \"\"\" For nodes that can", "self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return self.get_next_token_type() == Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender())", "clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" logging.debug( \"Move token pos {pos}", "Token.ATTRIBUTE and token.value.upper() == \"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\"", "the selector is only called when a selector must be", "was correctly parsed if last one was whitespace or return", "will be created that would be nested. For the example", "\"\"\" UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction. So", "not raise unexpected token Args: token_type: A token type Returns:", "token): return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return UpdateExpressionOperandParser def _binop_factory_class(self): return", "= self.FUNCTIONS[function_name] for i, func_elem_factory in enumerate(function_arguments): func_elem, self.token_pos =", "leading # to refer to attributes that have a name", "noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type", "an UpdateExpression) - DOT's: These are used to decent in", "attribute as how it is stored which has no special", "> 0: return self.token_list[self.token_pos - 1].value else: return \"\" def", "\"\"\" Get the value of the next token to be", "and self.get_last_token_type() == Token.WHITESPACE: return self.token_list[self.token_pos - 2].value else: return", "was whitespace or return empty string\"\"\" if self.get_following_token_type() == Token.WHITESPACE:", "{nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser):", "function. \"\"\" if token.type == Token.ATTRIBUTE: return token.value in cls.FUNCTIONS.keys()", "MAP. We will call each descend a patch chain -", "UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and", "self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar Operand* =>", "return token.type == Token.ATTRIBUTE and token.value.upper() == \"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser):", "UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() value, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos()", "UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression", "Parser class of the Operands for the Binary operations/actions. Returns:", "problematic_token_in_near = problematic_token = self.get_next_token_value() near = \"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(),", "def _is_possible_start(cls, token): \"\"\" Args: token(moto.dynamodb2.tokens.Token): Returns: bool: True if", "True return False def _parse(self): \"\"\" Update Expression is the", "self.token_pos > 0, \"We should always have positive indexes\" logging.debug(\"We", "pass closing bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value))", ":val2 / | | Operand + :val | a self.target_nodes", "in order. Continuing the example of an UpdateExpression: For example", "self.token_list[self.token_pos] except IndexError: return None def get_next_token_value(self): \"\"\" Get the", "def _is_possible_start(cls, token): return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] )", "= problematic_token = self.get_next_token_value() near = \"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(),", "IndexError: return \"\" else: return \"\" def skip_white_space(self): try: while", "the one that is being parsed or None if non", "**self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths are", "def _nested_expression_parser_class(cls): \"\"\"Returns the parser for the query part that", "def _parse(self): \"\"\" Start parsing the token_list from token_pos for", ") ) # noinspection PyProtectedMember ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast)", "that will be created that would be nested. For the", "def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls): return UpdateExpressionDeleteActions class", "adds re-usability for that type of pattern. This approach is", "Token.WHITESPACE: return self.token_list[self.token_pos - 2].value else: return \"\" def get_following_token_value(self):", "self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos", "chain - SELECTORs: E.g.: [1] These are used to select", "selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls,", "is_at_start(self): \"\"\"Return boolean indicating whether we are at start of", ") return attr_value @classmethod def _is_possible_start(cls, token): return token.type ==", "def _nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): \"\"\"", "token_list from token_pos for the factory type and also return", "an attribute name but always means to descent into a", "if self.is_at_end(): problematic_token = \"<EOF>\" problematic_token_in_near = \"\" else: problematic_token_in_near", "the next token.\".format( class_name=cls._nestable_class().__name__ ) ) @classmethod @abstractmethod def _nestable_class(cls):", "token.type == Token.ATTRIBUTE and token.value.upper() == \"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\"", "self.goto_next_significant_token() else: break if len(self.target_clauses) == 0: logging.debug( \"Didn't encounter", "is_next_token_start_of_patch_chain(self): return self.get_next_token_type() == Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def", "was correctly parsed or return None\"\"\" if self.token_pos > 0:", "aggressive on raising invalid Tokens. We can thus do the", "will process nodes in order. Continuing the example of an", "descent into a MAP. We will call each descend a", "str: The value if the token is of type `token_type`", "of an UpdateExpressionPath \"\"\" if token.type == Token.ATTRIBUTE_NAME: return True", "the number cannot be split up with spaces Attributes and", "def _parse(self): function_name = self.get_next_token_value() if function_name not in self.FUNCTIONS.keys():", "=> Operand* - Value If we consider it of structure", "UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod def _is_possible_start(cls, token): pass", "parsing\"\"\" return self.token_pos == len(self.token_list) def is_at_start(self): \"\"\"Return boolean indicating", "self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def", "correctly parsed or return empty string\"\"\" if self.token_pos > 0:", "forward and build the tree bottom up. For simplicity docstring", "Returns: bool: True if token is the start of a", "remain the ordering of the Nodes as how the corresponding", "of the next token to be processed Returns: str: Token", "within an Item. DynamoDB does not impose much restrictions on", "to decent in a nested structure. When a DOT is", "string\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos - 1].value else:", "any Returns: [path, value]: A list containing the Path node", "only gets called when expecting an AddAction. So we should", "UpdateExpressionSetActionParser @classmethod def _nestable_class(cls): return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction", "1 and self.get_last_token_type() == Token.WHITESPACE: return self.token_list[self.token_pos - 2].value else:", "the expression so it can be followed by others. Process", ") def _parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class())", "return self.token_list[self.token_pos] except IndexError: return None def get_next_token_value(self): \"\"\" Get", "] ) raise InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser): \"\"\" For nodes", "factories for its elements FUNCTIONS = { \"if_not_exists\": [ UpdateExpressionPathParser,", "of structure NestableBinExpression => TargetClause* NestableBinExpression => TargetClause* BinOp NestableBinExpression", "called when expecting an AddAction. So we should be aggressive", "calling class. See ExpressionParser for an example. Returns: dict: A", "itself. \"\"\" def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args()", "token to be processed Returns: str: value or None if", "NestableBinExpressionParser(ExpressionParser): \"\"\" For nodes that can be nested in themselves", "return None def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the 2nd following token that", "has no special characters except leading # to refer to", "it is expected to end up at the end of", "UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender,", "return UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths are selectors within", "4) skip whitespace if there are any Returns: [path, value]:", "token \"\"\" try: return self.token_list[self.token_pos] except IndexError: return None def", "refer to attributes that have a name that is not", "_parse(self): operation_value = self.get_next_token_value() assert operation_value in self.OPERATION_TOKENS self.goto_next_significant_token() return", "return self.token_list[self.token_pos - 1].type else: return None def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get", "\"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod def", "UpdateExpressionSetActionParser only gets called when expecting a SetAction. So we", "the Operands for the Binary operations/actions. Returns: class: \"\"\" @abstractmethod", "moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class NestableExpressionParserMixin(object): \"\"\" For nodes that", "list. Whitespaces can be between all these elements that build", "self.token_pos = func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if i + 1", "=> TargetClause* NestableExpression This pattern comes back multiple times. This", "special characters - ATTRIBUTE_NAME: A placeholder that has no special", "if self.get_next_token_type() == token_type: token_value = self.get_next_token_value() self.goto_next_significant_token() return token_value", "path expression it is never part of an attribute name", "abstract syntax tree \"\"\" @classmethod def is_possible_start(cls, token): return token", "If we consider it of structure NestableExpression => TargetClause* NestableExpression", "is the position in the tokenlist. \"\"\" return self._parse(), self.token_pos", "that gets the possible binary operation. Returns: class: A class", "parsed or return empty string\"\"\" if self.token_pos > 0: return", "self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def parse_path(self): \"\"\" A path is comprised", "value 4) skip whitespace if there are any Returns: [path,", "break elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return self._create_node() @classmethod def", "_parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos()", "len(self.target_clauses) > 0, \"No nodes for {cn}\".format( cn=self.__class__.__name__ ) target_node", "def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod def _nestable_class(cls): return UpdateExpressionSetActions class", "will call each descend a patch chain - SELECTORs: E.g.:", "only gets called when expecting a SetAction. So we should", "def _create_node(self): \"\"\" target_clauses has the nodes in order of", "end up at the end of the expression. \"\"\" while", "collections import deque from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions,", "part of an attribute name but always means to descent", "whitespaces between brackets and numbers but the number cannot be", "token @classmethod def _is_possible_start(cls, token): return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token),", "== Token.WHITESPACE: try: return self.token_list[self.token_pos + 2].value except IndexError: return", "super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes = deque() def _parse_target_clause(self, factory_class): \"\"\"", "are any 3) Process equal-sign token 4) skip whitespace if", "UpdateExpression: For example SET a=3 REMOVE b UpdateExpression / \\", "for factory in self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node, self.token_pos = factory(", "return UpdateExpressionSetActionParser @classmethod def _nestable_class(cls): return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): \"\"\"", "is expected to end up at the end of the", "4) skip whitespace if there are any 3) Process value", "**self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls,", "token_pos for the factory type and also return the resulting", "2) skip whitespace if there are any 3) Process a", "class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token, self.token_pos", "@classmethod def _is_possible_start(cls, token): raise RuntimeError( \"{class_name} cannot be identified", "names are case sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements =", "UpdateExpressionPath \"\"\" if token.type == Token.ATTRIBUTE_NAME: return True elif token.type", "UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionActionsParser,", "for example UpdateExpression's grammar: UpdateExpression => UpdateExpressionClause* UpdateExpression => UpdateExpressionClause*", "_nestable_class(cls): return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return", "abstract syntax tree and token_pos is the position in the", "will use Operand Node rather than the specific node This", "def is_possible_start(cls, token): return token is not None and cls._is_possible_start(token)", "ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions,", "return UpdateExpressionSetActions @classmethod @abstractmethod def _nested_expression_parser_class(cls): \"\"\"Returns the parser for", "Maker sure the next token is of type `token_type` if", "in self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar Operand*", "or return empty string\"\"\" if self.token_pos > 1 and self.get_last_token_type()", "Binary operations/actions. Returns: class: \"\"\" @abstractmethod def _binop_factory_class(self): \"\"\" Get", "is surrounded by round brackets. Each Operand can be a", "= [function_name] function_arguments = self.FUNCTIONS[function_name] for i, func_elem_factory in enumerate(function_arguments):", "< len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\"", "syntax tree and token_pos is the position in the tokenlist.", "_parse(self): return self.process_path() def process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def parse_path(self):", "def __init__(self, *args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes = []", "selectors within items to specify a part within an Item.", "UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def _is_possible_start(cls, token): return", "return empty string\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos -", "REMOVE RemoveActions \"\"\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos", "no special characters except leading # to refer to attributes", "that is being parsed or empty string if non existent.\"\"\"", "self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast])", "a single {nc} in {nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token()", "\"\"\" @classmethod def is_possible_start(cls, token): return token is not None", "\"Continue where previous parsing ended {token_pos}\".format( token_pos=token_pos ) ) self.token_pos", "\"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return", "data it stores but it does store more strict restrictions", "UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A grouped value is an Update Expression value", "self.token_pos == len(self.token_list) def is_at_start(self): \"\"\"Return boolean indicating whether we", "last token that was correctly parsed or return empty string\"\"\"", "UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser,", "path 2) skip whitespace if there are any 3) Process", "traversal will process nodes in order. Continuing the example of", "is_next_token_start_of_selector(self): return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def process_selector(self): \"\"\" Process the", "Process equal-sign token 4) skip whitespace if there are any", "def _is_possible_start(cls, token): return token.type in cls.OPERATION_TOKENS def _parse(self): operation_value", "parsed if last one was whitespace or return empty string\"\"\"", "__init__(self, *args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes = deque() def", "parsing is \"\"\" self.token_list = expression_token_list self.token_pos = token_pos def", "processed Returns: str: Token type or None if no more", "next token is of type `token_type` if not raise unexpected", "Take for example UpdateExpressionValue's grammar: Value => Operand* Value =>", "_parse(self): \"\"\" Update Expression is the top-most node therefore it", "return None def get_next_token_value(self): \"\"\" Get the value of the", "except leading # to refer to attributes that have a", "an example. Returns: dict: A dictionary of the initializer arguments", "def goto_next_significant_token(self): \"\"\"Continue past current token and skip all whitespaces\"\"\"", "is reached\") def process_token_of_type(self, token_type): \"\"\" Maker sure the next", "self._parse_expression_clause(sub_factory) return True return False def _parse(self): \"\"\" Update Expression", "**self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args()", "class NestableExpressionParserMixin(object): \"\"\" For nodes that can be nested in", "as how it is stored which has no special characters", "for that type of pattern. This approach is taken since", "True elif token.type == Token.ATTRIBUTE and token.value.upper() != \"REMOVE\": \"\"\"We", "assert operation_value in self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): \"\"\"", "ast, self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return", "correctly parsed if last one was whitespace or return empty", "cls._sub_factories()) def _parse(self): for factory in self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node,", "are used to select an element in ordered datatypes like", "len(self.target_clauses) > 0: target_node = self._nestable_class()( children=[self.target_clauses.pop(), target_node] ) return", "> 0: return self.token_list[self.token_pos - 1].type else: return None def", ") assert len(self.target_nodes) == 0 return target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin):", "out of range so end is reached\") def process_token_of_type(self, token_type):", "= UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) while len(self.target_nodes)", "value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path, value]) class", "= UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls,", "expression so it can be followed by others. Process SetActions", "the factory type and also return the resulting token_pos. Returns:", "= [] @classmethod def _is_possible_start(cls, token): \"\"\" Args: token(Token): the", "parsed or None if non existent.\"\"\" try: return self.token_list[self.token_pos +", "self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return", "of type `token_type` if not raise unexpected token Args: token_type:", "{token_pos}\".format( token_pos=self.token_pos ) ) def _parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class())", "UpdateExpressionOperandParser], } @classmethod def _is_possible_start(cls, token): \"\"\" Check whether a", "bool: True if token is the start of a function.", "logging.debug( \"Continue where previous parsing ended {token_pos}\".format( token_pos=token_pos ) )", "of the Node that will be created that would be", "with DOT's. Returns: UpdateExpressionPath: \"\"\" self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain()", "A helper to process a function of an Update Expression", "def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionRemoveActionParser only", "UpdateExpressionFunction Operand* => Path Operand* => GroupedValue \"\"\" @classmethod def", "Token.WHITESPACE: try: return self.token_list[self.token_pos + 2].value except IndexError: return \"\"", "within items to specify a part within an Item. DynamoDB", "_nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls): return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser):", "would be nested. For the example in the docstring this", "import abstractmethod import abc import six from collections import deque", "if factory.is_possible_start(self.get_next_token()): node, self.token_pos = factory( **self._initializer_args() )._parse_with_pos() return node", "UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], \"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod def _is_possible_start(cls,", "self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise InvalidTokenException(problematic_token, near)", "token_list = ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause =>", "correctly parsed or return None\"\"\" if self.token_pos > 0: return", "dict: A dictionary of the initializer arguments \"\"\" @classmethod @abstractmethod", "ended {token_pos}\".format( token_pos=token_pos ) ) self.token_pos = token_pos @abstractmethod def", "the class of the Node that will be created that", "comprised of: - Attribute: the name of an attribute as", "with spaces Attributes and attribute_names must be separated with DOT's.", "as how the corresponding tokens where in the originating expression.", "while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return self.get_next_token_type() == Token.DOT", "there are any 3) Process equal-sign token 4) skip whitespace", "\"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser,", "stored which has no special characters - ATTRIBUTE_NAME: A placeholder", "A list containing the Path node and the AttributeValue nodes", "| | Operand + :val | a self.target_nodes looks like:", "== Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def", "Map function to the factories for its elements FUNCTIONS =", "except IndexError: return None def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the 2nd following", "next token \"\"\" try: return self.get_next_token().type except AttributeError: return None", "while len(self.target_clauses) > 0: target_node = self._nestable_class()( children=[self.target_clauses.pop(), target_node] )", "indicating whether we are at end of the parsing\"\"\" return", "When a DOT is in a path expression it is", "not in self.FUNCTIONS.keys(): # Function names are case sensitive raise", "more SetAction. \"\"\" self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space()", "UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser only gets called when expecting", "self.token_list[self.token_pos + 2].value except IndexError: return \"\" else: return \"\"", "looks like: ( a >> + >> :val >> -", "is root node of resulting abstract syntax tree and token_pos", "use Operand Node rather than the specific node This way", "\"\"\" A path is comprised of: - Attribute: the name", "be processed Returns: str: value or None if no more", "try: return self.token_list[self.token_pos + 1].type except IndexError: return None def", "there are any 3) Process a value 4) skip whitespace", "+ :val - :val2 UpdateExpressionValue / | \\ UpdateExpressionValue BinOp", "children. Left child Path and right child Value. \"\"\" @classmethod", "a function. \"\"\" if token.type == Token.ATTRIBUTE: return token.value in", "\"\"\" def __init__(self, *args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes =", "through them backwards and build the tree bottom up. This", "str: Token type or None if no more next token", "type Returns: str: The value if the token is of", "UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod", "parsing the token_list from token_pos for the factory type. Returns:", "len(self.target_clauses) == 0: logging.debug( \"Didn't encounter a single {nc} in", "def __init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def", "build the tree bottom up. For simplicity docstring will use", "build a path. For SELECTORs it is also allowed to", "\"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod def _is_possible_start(cls, token): \"\"\" Check", "@abstractmethod def _parse(self): \"\"\" Start parsing the token_list from token_pos", "def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the 2nd last token that was correctly", "path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value,", "child Path and right child Value. \"\"\" @classmethod def _is_possible_start(cls,", "type that was correctly parsed or return None\"\"\" if self.token_pos", "an Update Expression value clause that is surrounded by round", "_sub_factories(cls): return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod def", "arguments \"\"\" @classmethod @abstractmethod def _nestable_class(cls): \"\"\" Get the class", "A grouped value is an Update Expression value clause that", "return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] ) class UpdateExpressionFunctionParser(ExpressionParser): \"\"\"", "=> Operand* Value => Operand* + Value Value => Operand*", "always means to descent into a MAP. We will call", "= UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() value, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args()", "Returns: bool: Whether the token could be the start of", "continue else: self.raise_unexpected_token() return self._create_node() @classmethod def make(cls, expression_str): token_list", "return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod def _is_possible_start(cls,", "__init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls):", "Returns: (ast, token_pos): tuple of AST which is root node", "**self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if i + 1 < len(function_arguments): self.skip_white_space()", "by the factory. \"\"\" if len(self.target_nodes) == 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()])", "@classmethod @abstractmethod def _nested_expression_parser_class(cls): \"\"\"Returns the parser for the query", "being parsed or empty string if non existent.\"\"\" try: return", "UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionDeleteActionsParser(", ")._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls, token):", "Node that will be created that would be nested. For", "Nodes that will be created. \"\"\" def _create_node(self): \"\"\" target_clauses", "self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return self.get_next_token_type() ==", "are case sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name]", "class UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction => Path = Value So we", "token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def", "class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions \"\"\" def __init__(self, *args, **kwargs):", "For example SET a=3 REMOVE b UpdateExpression / \\ SET", "@classmethod def _is_possible_start(cls, token): \"\"\" Args: token(Token): the token to", "UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause,", "def process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def parse_path(self): \"\"\" A path", "- 1].value else: return \"\" def get_last_token_type(self): \"\"\"Get the last", "token 4) skip whitespace if there are any 3) Process", "token to be processed Returns: str: Token type or None", "self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] function_arguments = self.FUNCTIONS[function_name] for i,", "next token to be processed Returns: str: Token type or", "closing bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class", "return self._parse() def get_next_token_type(self): \"\"\" Get the type of the", "def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args()", "be nested. For the example in the docstring this would", "== 0 def get_last_token_value(self): \"\"\"Get the last token that was", "the nested nodes\"\"\" def _parse(self): \"\"\" UpdateExpressionSetActions is inside the", "3) Process equal-sign token 4) skip whitespace if there are", "@classmethod def _is_possible_start(cls, token): return any(parser.is_possible_start(token) for parser in cls._sub_factories())", "moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced", "import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue,", "used to select an element in ordered datatypes like a", "self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path,", "{ \"if_not_exists\": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], \"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], }", "\"\"\" # Map function to the factories for its elements", "moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting abstract syntax", "self._parse(), self.token_pos def parse(self): return self._parse() def get_next_token_type(self): \"\"\" Get", "simplicity docstring will use Operand Node rather than the specific", "target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" logging.debug( \"Move token pos", "= \"\" else: problematic_token_in_near = problematic_token = self.get_next_token_value() near =", "self.token_pos += 1 except IndexError: assert self.token_pos > 0, \"We", "_is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper() == \"SET\"", "{\"expression_token_list\": self.token_list, \"token_pos\": self.token_pos} @abstractmethod def _parse(self): \"\"\" Start parsing", "name but always means to descent into a MAP. We", "Expression is the top-most node therefore it is expected to", "UpdateExpression's. \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes", "_nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser", "an element in ordered datatypes like a list. Whitespaces can", "def _nestable_class(cls): return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token):", "it of structure NestableExpression => TargetClause* NestableExpression => TargetClause* NestableExpression", "import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class NestableExpressionParserMixin(object):", "token): return token.type == Token.ATTRIBUTE and token.value.upper() == \"DELETE\" class", "token(Token): the token to be checked Returns: bool: Whether the", "return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions \"\"\" def __init__(self,", "in a path expression it is never part of an", "also allowed to have whitespaces between brackets and numbers but", "ATTRIBUTE_NAME: A placeholder that has no special characters except leading", "is_possible_start(cls, token): return token is not None and cls._is_possible_start(token) @classmethod", "@classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value())", "For example value => a + :val - :val2 UpdateExpressionValue", "is never part of an attribute name but always means", "noinspection PyProtectedMember ast, self.token_pos = factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug(", "def _parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser only gets called when expecting an", "UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod def", "UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute,", "= factory( **self._initializer_args() )._parse_with_pos() return node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def", "token, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else: token, self.token_pos =", "Process SetActions one by one until no more SetAction. \"\"\"", "@classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionSetActionParser", "return self.process_path() def process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def parse_path(self): \"\"\"", "return UpdateExpressionOperandParser def _binop_factory_class(self): return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A", "for the Binary operations/actions. Returns: class: \"\"\" @abstractmethod def _binop_factory_class(self):", "in order of encountering. Go through them backwards and build", "self.token_pos > 0: return self.token_list[self.token_pos - 1].type else: return None", "Value Value => Operand* - Value If we consider it", "consider it of structure NestableBinExpression => TargetClause* NestableBinExpression => TargetClause*", "also return the resulting token_pos. Returns: (ast, token_pos): tuple of", "class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause => REMOVE RemoveActions \"\"\" def _parse(self):", "all whitespaces\"\"\" self.token_pos += 1 self.skip_white_space() def raise_unexpected_token(self): if self.is_at_end():", "if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE: return self.token_list[self.token_pos", "return UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls): return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\"", "_is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper() == \"DELETE\"", "expressions \"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser,", "resulting abstract syntax tree and token_pos is the position in", "was correctly parsed or return empty string\"\"\" if self.token_pos >", "| Operand + :val | a self.target_nodes looks like: (", "there are any 3) Process value \"\"\" path, self.token_pos =", "== Token.WHITESPACE: self.token_pos += 1 except IndexError: assert self.token_pos >", "Get the value of the next token to be processed", "up. For simplicity docstring will use Operand Node rather than", "is of type `token_type` \"\"\" if self.get_next_token_type() == token_type: token_value", "the position in the tokenlist. \"\"\" return self._parse(), self.token_pos def", "return \"\" def skip_white_space(self): try: while self.get_next_token_type() == Token.WHITESPACE: self.token_pos", "UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod", "Token.ATTRIBUTE and token.value.upper() == \"SET\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token()", "InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class NestableExpressionParserMixin(object): \"\"\"", "skip optional spaces - read numeric literal - skip optional", "taken since it allows to remain the ordering of the", "str: value or None if no more next token \"\"\"", "return token is not None and cls._is_possible_start(token) @classmethod @abstractmethod def", "sub_factory in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True return False", "previous parsing ended {token_pos}\".format( token_pos=token_pos ) ) self.token_pos = token_pos", "Token.WHITESPACE: self.token_pos += 1 except IndexError: assert self.token_pos > 0,", "helper to process a function of an Update Expression \"\"\"", "the token_list from token_pos for the factory type. Returns: moto.dynamodb2.ast_nodes.Node:", "example value => a + :val - :val2 UpdateExpressionValue /", "2) skip whitespace if there are any \"\"\" path, self.token_pos", "\"\"\" Update Expression is the top-most node therefore it is", "== Token.ATTRIBUTE: return token.value in cls.FUNCTIONS.keys() else: return False def", "def _parse(self): operation_value = self.get_next_token_value() assert operation_value in self.OPERATION_TOKENS self.goto_next_significant_token()", "the Path node and the AttributeValue nodes \"\"\" path, self.token_pos", "== \"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls):", "be processed Returns: moto.dynamodb2.tokens.Token: or None if no more next", "ordered datatypes like a list. Whitespaces can be between all", "self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return token @classmethod def _is_possible_start(cls,", "self.target_clauses = deque() def _parse_target_clause(self, factory_class): \"\"\" Args: factory_class: The", "spaces - read numeric literal - skip optional spaces -", "a value 4) skip whitespace if there are any Returns:", "next token \"\"\" try: return self.get_next_token().value except AttributeError: return None", "Returns: bool: True if token is a possible start for", "representing the Expression as produced by the factory. \"\"\" if", "return UpdateExpressionPath(children=self.path_nodes) def parse_path(self): \"\"\" A path is comprised of:", "token): return token.type == Token.ATTRIBUTE and token.value.upper() == \"SET\" def", "that is being parsed or None if non existent.\"\"\" try:", "\"\"\" logging.debug( \"Move token pos {pos} to continue parsing with", ") Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression", "was whitespace or return empty string\"\"\" if self.token_pos > 1", "UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) assert len(self.target_nodes) ==", "Whitespaces can be between all these elements that build a", "characters except leading # to refer to attributes that have", "def _is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return UpdateExpressionOperandParser def", "\"\"\"Get the 2nd following token that was correctly parsed if", "moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class", "@classmethod @abstractmethod def _nestable_class(cls): \"\"\" Get the class of the", "break if len(self.target_clauses) == 0: logging.debug( \"Didn't encounter a single", "[] @classmethod def _is_possible_start(cls, token): \"\"\" Args: token(Token): the token", "of an Update Expression \"\"\" # Map function to the", "self.token_pos = token_pos def _initializer_args(self): return {\"expression_token_list\": self.token_list, \"token_pos\": self.token_pos}", "UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import", "of range so end is reached\") def process_token_of_type(self, token_type): \"\"\"", "next token to be processed Returns: moto.dynamodb2.tokens.Token: or None if", "example in the docstring this would be UpdateExpression Returns: class:", "left-deep-descending traversal will process nodes in order. Continuing the example", "Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as", "self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value)", "boolean indicating whether we are at end of the parsing\"\"\"", "tree \"\"\" @classmethod def is_possible_start(cls, token): return token is not", "possible start for entries processed by `cls` \"\"\" def _parse_with_pos(self):", "right child Value. \"\"\" @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token)", "token): pass def __init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self)", "deque from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions,", "@classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionRemoveActionParser", "\"\"\" Process the selector is only called when a selector", "TargetClause* NestableBinExpression => TargetClause* BinOp NestableBinExpression This pattern comes back", "value => a + :val - :val2 UpdateExpressionValue / |", "def _is_possible_start(cls, token): \"\"\" Args: token(Token): the token to be", "self.token_list[self.token_pos - 2].value else: return \"\" def get_following_token_value(self): \"\"\"Get the", "top-most node therefore it is expected to end up at", "class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser", "self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node, self.token_pos = factory( **self._initializer_args() )._parse_with_pos() return", "target_clauses has the nodes in order of encountering. Go through", "self.get_next_token() ): token, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else: token,", "the data it stores but it does store more strict", "not None and cls._is_possible_start(token) @classmethod @abstractmethod def _is_possible_start(cls, token): \"\"\"", "if self.is_at_end(): logging.debug(\"End reached\") break elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token()", "= UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos =", "else: self.raise_unexpected_token() return self._create_node() @classmethod def make(cls, expression_str): token_list =", "DOT's: These are used to decent in a nested structure.", "[1] These are used to select an element in ordered", ")._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token())", "clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" # noinspection PyProtectedMember ast, self.token_pos", "skip whitespace if there are any 3) Process a value", "== \"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls):", "class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A grouped value is an Update Expression", "=> GroupedValue \"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser,", "b self.target_clauses looks like: ( SET a=3 >> REMOVE b", "} @classmethod def _is_possible_start(cls, token): \"\"\" Check whether a token", "ast, self.token_pos = factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug( \"Continue where", "as produced by the factory. \"\"\" if len(self.target_nodes) == 1:", "None def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the 2nd last token that was", "Returns: str: value or None if no more next token", "self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls): return UpdateExpression def _parse_expression_clause(self,", "== \"SET\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos =", "sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True return False def _parse(self): \"\"\" Update", "- skip opening bracket - skip optional spaces - read", "from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction,", "_parse_with_pos(self): \"\"\" Start parsing the token_list from token_pos for the", "\"{class_name} cannot be identified by the next token.\".format( class_name=cls._nestable_class().__name__ )", "def _parse(self): \"\"\" UpdateExpressionSetActions is inside the expression so it", "factory. \"\"\" if len(self.target_nodes) == 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node", "=> Operand* + Value Value => Operand* - Value If", "== Token.ATTRIBUTE and token.value.upper() == \"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions", "For nodes that can be nested in themselves (recursive). Take", "Operand* - Value If we consider it of structure NestableBinExpression", "def _parse_by_a_subfactory(self): for sub_factory in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return", "def __init__(self, expression_token_list, token_pos=0): \"\"\" Args: expression_token_list: token_pos(int): Location where", "UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls): return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def", "self.token_pos def parse(self): return self._parse() def get_next_token_type(self): \"\"\" Get the", "self.target_nodes looks like: ( a >> + >> :val >>", "are any 3) Process value \"\"\" path, self.token_pos = UpdateExpressionPathParser(", "Process a value 4) skip whitespace if there are any", "def __init__(self, *args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def", "invalid Tokens. We can thus do the following: 1) Process", "\"\" def get_last_token_type(self): \"\"\"Get the last token type that was", "\"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() value, self.token_pos", "self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self): return", "we consider it of structure NestableBinExpression => TargetClause* NestableBinExpression =>", "of an attribute as how it is stored which has", "token to be processed Returns: moto.dynamodb2.tokens.Token: or None if no", "function Args: token(Token): the token to check Returns: bool: True", "not passed\"\"\" return True return False def _parse(self): return self.process_path()", "to attributes that have a name that is not allowed", "problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser):", "UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction => Path = Value So", "process nodes in order. Continuing the example of an UpdateExpressionValue:", "skip_white_space(self): try: while self.get_next_token_type() == Token.WHITESPACE: self.token_pos += 1 except", "previous parsing ended {token_pos}\".format( token_pos=self.token_pos ) ) def _parse(self): self._parse_target_clause(self._operand_factory_class())", "**kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls): return UpdateExpression", "= self.get_next_token_value() near = \"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(),", "there are any Returns: [path, value]: A list containing the", "NestableExpression => TargetClause* NestableExpression This pattern comes back multiple times.", "\"\"\" while True: self.skip_white_space() if self.is_at_end(): logging.debug(\"End reached\") break elif", "token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( \"Continue where previous parsing ended", "UpdateExpressionSetClause => SET SetActions \"\"\" @classmethod def _is_possible_start(cls, token): return", "_nestable_class(cls): \"\"\" Get the class of the Node that will", "equal-sign token 4) skip whitespace if there are any 3)", "= self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls, token):", "and also return the resulting token_pos. Returns: (ast, token_pos): tuple", "an AddAction. So we should be aggressive on raising invalid", "one by one until no more SetAction. \"\"\" self.skip_white_space() while", "them backwards and build the tree bottom up. This way", ") self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod", "the initializer arguments \"\"\" @classmethod @abstractmethod def _nestable_class(cls): \"\"\" Get", "the specific node This way left-deep-descending traversal will process nodes", "AST representing the Expression as produced by the factory. \"\"\"", "one was whitespace or return empty string\"\"\" if self.get_following_token_type() ==", "UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and", "self.target_clauses looks like: ( SET a=3 >> REMOVE b )", "parsing ended {token_pos}\".format( token_pos=token_pos ) ) self.token_pos = token_pos @abstractmethod", "Check whether a token is supposed to be a function", "UpdateExpression) - DOT's: These are used to decent in a", "in the originating expression. \"\"\" def __init__(self, *args, **kwargs): super(NestableBinExpressionParser,", "how they are represented in UpdateExpression's. \"\"\" def __init__(self, *args,", "**kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes = [] @classmethod def _is_possible_start(cls,", "type `token_type` \"\"\" if self.get_next_token_type() == token_type: token_value = self.get_next_token_value()", "empty string if non existent.\"\"\" try: return self.token_list[self.token_pos + 1].value", "existent.\"\"\" try: return self.token_list[self.token_pos + 1].value except IndexError: return \"\"", "def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def", "0, \"No nodes for {cn}\".format( cn=self.__class__.__name__ ) target_node = self._nestable_class()(children=[self.target_clauses.pop()])", "allows to remain the ordering of the Nodes as how", "the target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" logging.debug( \"Move token", "def _parse(self): for factory in self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node, self.token_pos", "== Token.ATTRIBUTE and token.value.upper() == \"SET\" def _parse(self): assert self.is_possible_start(self.get_next_token())", "raise_unexpected_token(self): if self.is_at_end(): problematic_token = \"<EOF>\" problematic_token_in_near = \"\" else:", "\\ UpdateExpressionValue BinOp Operand / | | | | UpdateExpressionValue", "Go through them backwards and build the tree bottom up.", "process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector()", "self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() value, self.token_pos = UpdateExpressionAttributeValueParser(", "Attribute: the name of an attribute as how it is", "UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths are selectors within items to specify a", "= UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return token @classmethod def _is_possible_start(cls, token):", "( SET a=3 >> REMOVE b ) Returns: moto.dynamodb2.ast_nodes.Node: Node", "but it does store more strict restrictions on how they", "that would be nested. For the example in the docstring", "if self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token() else: break if len(self.target_clauses) ==", "= self.get_next_token_value() if function_name not in self.FUNCTIONS.keys(): # Function names", "of pattern. This approach is taken since it allows to", "== Token.COMMA: self.goto_next_significant_token() else: break if len(self.target_clauses) == 0: logging.debug(", "an attribute as how it is stored which has no", "order of encountering. Go through them forward and build the", "be separated with DOT's. Returns: UpdateExpressionPath: \"\"\" self.parse_path_chain() while self.is_next_token_start_of_patch_chain():", "We can thus do the following: 1) Process path 2)", "enumerate(function_arguments): func_elem, self.token_pos = func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if i", "Operand* => AttributeValue Operand* => UpdateExpressionFunction Operand* => Path Operand*", "A token type Returns: str: The value if the token", "through them forward and build the tree bottom up. For", "\"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return", "token_list from token_pos for the factory type. Returns: moto.dynamodb2.ast_nodes.Node: AST", "class {fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__ ) ) # noinspection PyProtectedMember ast,", "else: self.raise_unexpected_token() return self._create_node() @abstractmethod def _operand_factory_class(self): \"\"\" Get the", "class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self):", "for the target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" logging.debug( \"Move", "+ 1].type except IndexError: return None def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the", "where previous parsing ended {token_pos}\".format( token_pos=token_pos ) ) self.token_pos =", "\"\"\" Args: token(moto.dynamodb2.tokens.Token): Returns: bool: True if token is a", "a=3 >> REMOVE b ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an", "but always means to descent into a MAP. We will", "UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import InvalidTokenException,", "are selectors within items to specify a part within an", "\"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod", "_parse(self): \"\"\" UpdateExpressionSetActions is inside the expression so it can", "@classmethod def _nestable_class(cls): return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls,", "is implemented by the calling class. See ExpressionParser for an", "be created that would be nested. For the example in", "a path. For SELECTORs it is also allowed to have", "skip whitespace if there are any 3) Process equal-sign token", "the example of an UpdateExpressionValue: For example value => a", "\"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes =", "return any(parser.is_possible_start(token) for parser in cls._sub_factories()) def _parse(self): for factory", "\"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod def _nestable_class(cls): return", "Get the next token to be processed Returns: moto.dynamodb2.tokens.Token: or", "UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod def _nestable_class(cls):", "self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) > 0: target_node = self._nestable_class()( children=[self.target_clauses.pop(), target_node]", "the Nodes that will be created. \"\"\" def _create_node(self): \"\"\"", "return None def is_at_end(self): \"\"\"Return boolean indicating whether we are", "class UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths are selectors within items to specify", "encountering. Go through them forward and build the tree bottom", "\"\"\" if token.type == Token.ATTRIBUTE_NAME: return True elif token.type ==", "an UpdateExpressionSetAction Node that has 2 children. Left child Path", "operation_value in self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar", "be between all these elements that build a path. For", "value is an Update Expression value clause that is surrounded", "len(self.target_nodes) == 0 return target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" Parser", "@abstractmethod def _operand_factory_class(self): \"\"\" Get the Parser class of the", "token): return any(parser.is_possible_start(token) for parser in cls._sub_factories()) def _parse(self): for", "0 def get_last_token_value(self): \"\"\"Get the last token that was correctly", "UpdateExpressionValue BinOp Operand - :val2 / | | Operand +", "from abc import abstractmethod import abc import six from collections", "dictionary of the initializer arguments \"\"\" @classmethod @abstractmethod def _nestable_class(cls):", "on raising invalid Tokens. We can thus do the following:", "= UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser): \"\"\"", "comes back multiple times. This Mixin adds re-usability for that", "self.raise_unexpected_token() return self._create_node() @abstractmethod def _operand_factory_class(self): \"\"\" Get the Parser", "return target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" Parser to create update", "following token that was correctly parsed if 1st one was", "skip all whitespaces\"\"\" self.token_pos += 1 self.skip_white_space() def raise_unexpected_token(self): if", "_nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls): return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser):", "i + 1 < len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements)", "\"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls): return", "Args: factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser", "self.skip_white_space() value, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return [path,", "last token type that was correctly parsed or return None\"\"\"", "def _operand_factory_class(self): return UpdateExpressionOperandParser def _binop_factory_class(self): return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser):", "token is of type `token_type` \"\"\" if self.get_next_token_type() == token_type:", "deque() def _parse_target_clause(self, factory_class): \"\"\" Args: factory_class: The factory for", "self.target_nodes.popleft(), ] ) assert len(self.target_nodes) == 0 return target_node class", "return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionSetActionParser only gets called when", "if no more next token \"\"\" try: return self.get_next_token().value except", "return target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser: \"\"\"Abstract class\"\"\" def __init__(self, expression_token_list,", "token): \"\"\" Args: token(moto.dynamodb2.tokens.Token): Returns: bool: True if token is", "def _initializer_args(self): \"\"\" Get the arguments of the initializer. This", "nested. For the example in the docstring this would be", "themselves (recursive). Take for example UpdateExpression's grammar: UpdateExpression => UpdateExpressionClause*", "def _initializer_args(self): return {\"expression_token_list\": self.token_list, \"token_pos\": self.token_pos} @abstractmethod def _parse(self):", "Operand* => Path Operand* => GroupedValue \"\"\" @classmethod def _sub_factories(cls):", "# Function names are case sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET)", "0: logging.debug( \"Didn't encounter a single {nc} in {nepc}.\".format( nc=self._nestable_class().__name__,", "@classmethod def _is_possible_start(cls, token): pass def __init__(self, *args, **kwargs): super(UpdateExpressionParser,", "in self.FUNCTIONS.keys(): # Function names are case sensitive raise InvalidUpdateExpression(function_name)", "possible binary operation. Returns: class: A class extending ExpressionParser \"\"\"", "REMOVE b self.target_clauses looks like: ( SET a=3 >> REMOVE", "== \"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls):", "e.g. UpdateExpressionSetClauseParser Returns: \"\"\" # noinspection PyProtectedMember ast, self.token_pos =", "has 2 children. Left child Path and right child Value.", "2 children. Left child Path and right child Value. \"\"\"", "UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause,", "\"\"\" @classmethod @abstractmethod def _nestable_class(cls): \"\"\" Get the class of", "if last one was whitespace or return empty string\"\"\" if", "token to be checked Returns: bool: Whether the token could", "is supposed to be a function Args: token(Token): the token", "= UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod", "1) Process path 2) skip whitespace if there are any", "arguments of the initializer. This is implemented by the calling", "target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" # noinspection PyProtectedMember ast,", "type. Returns: moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting", "self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) assert len(self.target_nodes) == 0 return target_node", "1 except IndexError: assert self.token_pos > 0, \"We should always", "> 0, \"No nodes for {cn}\".format( cn=self.__class__.__name__ ) target_node =", "self.process_path() def process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def parse_path(self): \"\"\" A", "token_pos(int): Location where parsing is \"\"\" self.token_list = expression_token_list self.token_pos", "Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token()", "UpdateExpressionGroupedValueParser, ] @classmethod def _is_possible_start(cls, token): return any(parser.is_possible_start(token) for parser", "self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return self._create_node() @abstractmethod", "restrictions on how they are represented in UpdateExpression's. \"\"\" def", "token.type == Token.ATTRIBUTE: return token.value in cls.FUNCTIONS.keys() else: return False", "token): return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] ) class UpdateExpressionFunctionParser(ExpressionParser):", "self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return self.get_next_token_type() == Token.DOT def process_dot(self):", "\"\"\" Grammar Operand* => AttributeValue Operand* => UpdateExpressionFunction Operand* =>", "def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionSetActionParser only", "logging.debug( \"Didn't encounter a single {nc} in {nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__,", "it stores but it does store more strict restrictions on", ") from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token,", "when expecting a SetAction. So we should be aggressive on", "numeric literal - skip optional spaces - pass closing bracket", "== token_type: token_value = self.get_next_token_value() self.goto_next_significant_token() return token_value else: self.raise_unexpected_token()", "creates the nested nodes\"\"\" def _parse(self): \"\"\" UpdateExpressionSetActions is inside", "import logging from abc import abstractmethod import abc import six", "an Update Expression \"\"\" # Map function to the factories", "process_token_of_type(self, token_type): \"\"\" Maker sure the next token is of", "an Item. DynamoDB does not impose much restrictions on the", "class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser", "initializer arguments \"\"\" @classmethod @abstractmethod def _nestable_class(cls): \"\"\" Get the", "\"\"\" Get the arguments of the initializer. This is implemented", "@classmethod def _nestable_class(cls): return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction =>", "rather than the specific node This way left-deep-descending traversal will", "Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while", "bracket - skip optional spaces - read numeric literal -", "RemoveActions \"\"\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos =", "So we create an UpdateExpressionSetAction Node that has 2 children.", "\"<EOF>\" problematic_token_in_near = \"\" else: problematic_token_in_near = problematic_token = self.get_next_token_value()", "This approach is taken since it allows to remain the", ">= 2: target_node = UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ]", "\"\"\" try: return self.token_list[self.token_pos] except IndexError: return None def get_next_token_value(self):", "See ExpressionParser for an example. Returns: dict: A dictionary of", "def _parse(self): attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value @classmethod", "True return False def _parse(self): return self.process_path() def process_path(self): self.parse_path()", "\"\"\" Parser to create update expressions \"\"\" @classmethod def _sub_factories(cls):", "to select an element in ordered datatypes like a list.", "structure NestableExpression => TargetClause* NestableExpression => TargetClause* NestableExpression This pattern", "UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction => Path = Value So we create", "len(self.target_nodes) >= 2: target_node = UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(),", "- read numeric literal - skip optional spaces - pass", "of encountering. Go through them backwards and build the tree", "self).__init__(*args, **kwargs) self.target_nodes = deque() def _parse_target_clause(self, factory_class): \"\"\" Args:", "UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions \"\"\" def __init__(self, *args,", "- skip optional spaces - read numeric literal - skip", "| a self.target_nodes looks like: ( a >> + >>", "raising invalid Tokens. We can thus do the following: 1)", ") ) self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\"", "the end of the expression. \"\"\" while True: self.skip_white_space() if", "if function_name not in self.FUNCTIONS.keys(): # Function names are case", "_is_possible_start(cls, token): \"\"\" Args: token(moto.dynamodb2.tokens.Token): Returns: bool: True if token", "tree bottom up. For simplicity docstring will use Operand Node", "and token.value.upper() == \"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod", "elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return self._create_node() @classmethod def make(cls,", "function of an Update Expression \"\"\" # Map function to", "the token could be the start of an UpdateExpressionPath \"\"\"", "\"\"\" RemoveAction => Path = Value So we create an", "resulting token_pos. Returns: (ast, token_pos): tuple of AST which is", "one that is being parsed or None if non existent.\"\"\"", "def make(cls, expression_str): token_list = ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser):", "class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return", "value \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN)", "value]) class UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths are selectors within items to", "Operand Node rather than the specific node This way left-deep-descending", "into a MAP. We will call each descend a patch", "problematic_token = self.get_next_token_value() near = \"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near,", "Tokens. We can thus do the following: 1) Process path", "Args: token(Token): the token to check Returns: bool: True if", "\"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls): return", "root node of resulting abstract syntax tree \"\"\" @classmethod def", "return False def _parse(self): return self.process_path() def process_path(self): self.parse_path() return", "specific factory class {fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__ ) ) # noinspection", "or return None\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos -", "brackets and numbers but the number cannot be split up", "token): return token.type in cls.OPERATION_TOKENS def _parse(self): operation_value = self.get_next_token_value()", "whitespace if there are any Returns: [path, value]: A list", "+ :val | a self.target_nodes looks like: ( a >>", "self.target_nodes = deque() def _parse_target_clause(self, factory_class): \"\"\" Args: factory_class: The", "_is_possible_start(cls, token): \"\"\" Check whether a token is supposed to", "on the data it stores but it does store more", "the parsing\"\"\" return self.token_pos == len(self.token_list) def is_at_start(self): \"\"\"Return boolean", "ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value @classmethod def _is_possible_start(cls, token): return", "def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args()", "+ 1 < len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class", "AttributeError: return None def is_at_end(self): \"\"\"Return boolean indicating whether we", "UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction => Path = Value So we create", "self.goto_next_significant_token() def is_next_token_start_of_selector(self): return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def process_selector(self): \"\"\"", "self.skip_white_space() def process_attribute_identifying_token(self): if self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type()", "UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return", "=> TargetClause* NestableExpression => TargetClause* NestableExpression This pattern comes back", "**self._initializer_args() )._parse_with_pos() self.skip_white_space() return [path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def", "of an attribute name but always means to descent into", "= UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) assert len(self.target_nodes)", "def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args()", "_is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionSetActionParser only gets", "be processed Returns: str: Token type or None if no", "\"\"\" def _create_node(self): \"\"\" target_clauses has the nodes in order", "_binop_factory_class(self): return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A grouped value is", "token(moto.dynamodb2.tokens.Token): Returns: bool: True if token is a possible start", "UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod def _is_possible_start(cls, token): pass def", "if there are any 3) Process value \"\"\" path, self.token_pos", "token.type == Token.ATTRIBUTE_NAME: return True elif token.type == Token.ATTRIBUTE and", "def _is_possible_start(cls, token): raise RuntimeError( \"{class_name} cannot be identified by", "attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value @classmethod def _is_possible_start(cls,", "(recursive). Take for example UpdateExpression's grammar: UpdateExpression => UpdateExpressionClause* UpdateExpression", "SET SetActions \"\"\" @classmethod def _is_possible_start(cls, token): return token.type ==", "*args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes = [] @classmethod def", "separated with DOT's. Returns: UpdateExpressionPath: \"\"\" self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot()", "numbers but the number cannot be split up with spaces", "token_pos=self.token_pos ) ) def _parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if", "the target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" # noinspection PyProtectedMember", "return \"\" else: return \"\" def skip_white_space(self): try: while self.get_next_token_type()", "node of resulting abstract syntax tree \"\"\" @classmethod def is_possible_start(cls,", ")._parse_with_pos() return token @classmethod def _is_possible_start(cls, token): return any( [", "@classmethod def _sub_factories(cls): return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ]", "def _sub_factories(cls): return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod", "the parser for the query part that creates the nested", "in themselves (recursive). Take for example UpdateExpression's grammar: UpdateExpression =>", "up. This way left-deep-descending traversal will process nodes in order.", "an operation. Take for example UpdateExpressionValue's grammar: Value => Operand*", "the AttributeValue nodes \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos()", "encounter a single {nc} in {nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) )", "it of structure NestableBinExpression => TargetClause* NestableBinExpression => TargetClause* BinOp", "self.get_last_token_type() == Token.WHITESPACE: return self.token_list[self.token_pos - 2].value else: return \"\"", "nodes \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() value,", "called when expecting a SetAction. So we should be aggressive", "class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" Parser to create update expressions \"\"\"", "AST which is root node of resulting abstract syntax tree", "return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node = UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(),", "token_value else: self.raise_unexpected_token() def goto_next_significant_token(self): \"\"\"Continue past current token and", "child Value. \"\"\" @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def", "def _binop_factory_class(self): return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A grouped value", "indicating whether we are at start of the parsing\"\"\" return", "_is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper() == \"ADD\"", "\"\"\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionRemoveActionsParser(", "2: target_node = UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ] )", "self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self): return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET", "of an UpdateExpressionValue: For example value => a + :val", "the token to check Returns: bool: True if token is", "the corresponding tokens where in the originating expression. \"\"\" def", "self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection", "a patch chain - SELECTORs: E.g.: [1] These are used", "and numbers but the number cannot be split up with", "operation. Returns: class: A class extending ExpressionParser \"\"\" def _create_node(self):", "the arguments of the initializer. This is implemented by the", "surrounded by round brackets. Each Operand can be a grouped", "_is_possible_start(cls, token): return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] ) class", "from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class NestableExpressionParserMixin(object): \"\"\" For nodes", "or return empty string\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos", "token that was correctly parsed if last one was whitespace", "can be nested in themselves (recursive) but with an operation.", "operation_value = self.get_next_token_value() assert operation_value in self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value)", "@classmethod def _is_possible_start(cls, token): \"\"\"REMOVE is not a keyword\"\"\" return", "grammar: Value => Operand* Value => Operand* + Value Value", "UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions,", "class\"\"\" def __init__(self, expression_token_list, token_pos=0): \"\"\" Args: expression_token_list: token_pos(int): Location", ") ) self.token_pos = token_pos @abstractmethod def _initializer_args(self): \"\"\" Get", "example of an UpdateExpressionValue: For example value => a +", "None def get_next_token(self): \"\"\" Get the next token to be", "target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser: \"\"\"Abstract class\"\"\" def __init__(self, expression_token_list, token_pos=0):", "Expression as produced by the factory. \"\"\" assert len(self.target_clauses) >", "return UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE", "be processed. So do the following actions: - skip opening", "problematic_token_in_near = \"\" else: problematic_token_in_near = problematic_token = self.get_next_token_value() near", "for i, func_elem_factory in enumerate(function_arguments): func_elem, self.token_pos = func_elem_factory( **self._initializer_args()", "nodes in order. Continuing the example of an UpdateExpression: For", "if 1st one was whitespace or return empty string\"\"\" if", "pattern. This approach is taken since it allows to remain", "or None if non existent.\"\"\" try: return self.token_list[self.token_pos + 1].type", "current token and skip all whitespaces\"\"\" self.token_pos += 1 self.skip_white_space()", "expecting an AddAction. So we should be aggressive on raising", "e.g. UpdateExpressionSetClauseParser Returns: \"\"\" logging.debug( \"Move token pos {pos} to", "return \"\" def get_following_token_value(self): \"\"\"Get the token value after the", "is not None and cls._is_possible_start(token) @classmethod @abstractmethod def _is_possible_start(cls, token):", "and cls._is_possible_start(token) @classmethod @abstractmethod def _is_possible_start(cls, token): \"\"\" Args: token(moto.dynamodb2.tokens.Token):", "unexpected token Args: token_type: A token type Returns: str: The", "self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) while len(self.target_nodes) >= 2: target_node", "== Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self): return self.get_next_token_type()", "2) skip whitespace if there are any 3) Process equal-sign", "are any Returns: [path, value]: A list containing the Path", "stores but it does store more strict restrictions on how", "except IndexError: assert self.token_pos > 0, \"We should always have", "class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self):", "can be nested in themselves (recursive). Take for example UpdateExpression's", "SELECTORs: E.g.: [1] These are used to select an element", "UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod def", "is stored which has no special characters - ATTRIBUTE_NAME: A", "like a list. Whitespaces can be between all these elements", "_parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token()", "up at the end of the expression. \"\"\" while True:", "a nested structure. When a DOT is in a path", "Path node and the AttributeValue nodes \"\"\" path, self.token_pos =", "self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token() def", "UpdateExpressionValue / | \\ UpdateExpressionValue BinOp Operand / | |", "if self.get_following_token_type() == Token.WHITESPACE: try: return self.token_list[self.token_pos + 2].value except", "can be a grouped value by itself. \"\"\" def _parse(self):", "@classmethod @abstractmethod def _is_possible_start(cls, token): \"\"\" Args: token(moto.dynamodb2.tokens.Token): Returns: bool:", "expected to end up at the end of the expression.", "@classmethod def _sub_factories(cls): return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ]", "produced by the factory. \"\"\" assert len(self.target_clauses) > 0, \"No", "literal - skip optional spaces - pass closing bracket \"\"\"", "@classmethod def is_possible_start(cls, token): return token is not None and", "example. Returns: dict: A dictionary of the initializer arguments \"\"\"", "1].type else: return None def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the 2nd last", "None def is_at_end(self): \"\"\"Return boolean indicating whether we are at", "def raise_unexpected_token(self): if self.is_at_end(): problematic_token = \"<EOF>\" problematic_token_in_near = \"\"", "should always have positive indexes\" logging.debug(\"We are out of range", "token.type == Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token()", "impose much restrictions on the data it stores but it", ") # noinspection PyProtectedMember ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug(", "token \"\"\" try: return self.get_next_token().value except AttributeError: return None def", "following: 1) Process path 2) skip whitespace if there are", ")._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos()", "TargetClause* BinOp NestableBinExpression This pattern comes back multiple times. This", "/ | | Operand + :val | a self.target_nodes looks", "self.token_list[self.token_pos + 1].value except IndexError: return \"\" def get_following_token_type(self): \"\"\"Get", "return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast,", "Operand can be a grouped value by itself. \"\"\" def", "def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args()", "the Node that will be created that would be nested.", "**self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls, token): return", "SET a=3 REMOVE b UpdateExpression / \\ SET a=3 UpdateExpression", ")._parse_with_pos() self.skip_white_space() return [path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls,", "func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if i + 1 < len(function_arguments):", "value of the next token to be processed Returns: str:", "{cn}\".format( cn=self.__class__.__name__ ) target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) > 0:", "\"\"\"Get the last token that was correctly parsed or return", "the tree bottom up. For simplicity docstring will use Operand", "ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause,", "the last token that was correctly parsed or return empty", "return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar Operand* => AttributeValue Operand*", "of an AST representing the Expression as produced by the", "while self.get_next_token_type() == Token.WHITESPACE: self.token_pos += 1 except IndexError: assert", "be UpdateExpression Returns: class: The class of the Nodes that", "value, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return [path, value]", "PyProtectedMember ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( \"Continue where previous", "self.path_nodes = [] @classmethod def _is_possible_start(cls, token): \"\"\" Args: token(Token):", "expression_token_list: token_pos(int): Location where parsing is \"\"\" self.token_list = expression_token_list", "which has no special characters - ATTRIBUTE_NAME: A placeholder that", "at the end of the expression. \"\"\" while True: self.skip_white_space()", "**self._initializer_args() )._parse_with_pos() else: token, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return", "could be the start of an UpdateExpressionPath \"\"\" if token.type", "logging.debug( \"Move token pos {pos} to continue parsing with specific", "\"Didn't encounter a single {nc} in {nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, )", "A dictionary of the initializer arguments \"\"\" @classmethod @abstractmethod def", "self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self): if self.get_next_token_type() ==", "self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token() else: break if len(self.target_clauses) == 0:", "\"\"\" if token.type == Token.ATTRIBUTE: return token.value in cls.FUNCTIONS.keys() else:", "@classmethod def make(cls, expression_str): token_list = ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class", "UpdateExpressionSetActions @classmethod @abstractmethod def _nested_expression_parser_class(cls): \"\"\"Returns the parser for the", "self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else: token, self.token_pos = UpdateExpressionPathParser(", "else: break if len(self.target_clauses) == 0: logging.debug( \"Didn't encounter a", "def get_following_token_type(self): \"\"\"Get the token type after the one that", "Start parsing the token_list from token_pos for the factory type.", "of the Operands for the Binary operations/actions. Returns: class: \"\"\"", "UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" Parser to create update expressions \"\"\" @classmethod", "= self._nestable_class()( children=[self.target_clauses.pop(), target_node] ) return target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser:", "UpdateExpressionSetActions is inside the expression so it can be followed", "return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls):", "UpdateExpression's grammar: UpdateExpression => UpdateExpressionClause* UpdateExpression => UpdateExpressionClause* UpdateExpression If", "REMOVE b UpdateExpression / \\ SET a=3 UpdateExpression | REMOVE", "originating expression. \"\"\" def __init__(self, *args, **kwargs): self.target_clauses = deque()", "\"\"\" Get the type of the next token to be", "token is supposed to be a function Args: token(Token): the", "if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return self._create_node() @abstractmethod def _operand_factory_class(self):", "containing the Path node and the AttributeValue nodes \"\"\" path,", "\"\"\"Abstract class\"\"\" def __init__(self, expression_token_list, token_pos=0): \"\"\" Args: expression_token_list: token_pos(int):", "@classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser):", "So do the following actions: - skip opening bracket -", "example SET a=3 REMOVE b UpdateExpression / \\ SET a=3", "class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser only gets called when", "Get the type of the next token to be processed", "token is of type `token_type` if not raise unexpected token", "import abc import six from collections import deque from moto.dynamodb2.parsing.ast_nodes", "def _is_possible_start(cls, token): \"\"\" Check whether a token is supposed", "start for entries processed by `cls` \"\"\" def _parse_with_pos(self): \"\"\"", "try: return self.get_next_token().type except AttributeError: return None def get_next_token(self): \"\"\"", "__init__(self, *args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes = [] @classmethod", "_parse(self): function_name = self.get_next_token_value() if function_name not in self.FUNCTIONS.keys(): #", "while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type() == Token.COMMA:", "if self.token_pos > 0: return self.token_list[self.token_pos - 1].type else: return", "reached\") def process_token_of_type(self, token_type): \"\"\" Maker sure the next token", "Returns: moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting abstract", "UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause => SET SetActions \"\"\" @classmethod def _is_possible_start(cls,", "sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] function_arguments =", "attribute_names must be separated with DOT's. Returns: UpdateExpressionPath: \"\"\" self.parse_path_chain()", "end is reached\") def process_token_of_type(self, token_type): \"\"\" Maker sure the", "on how they are represented in UpdateExpression's. \"\"\" def __init__(self,", "Operand / | | | | UpdateExpressionValue BinOp Operand -", "since it allows to remain the ordering of the Nodes", "try: return self.token_list[self.token_pos + 1].value except IndexError: return \"\" def", "abc import abstractmethod import abc import six from collections import", "UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths", "self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls, token): return", "None def get_next_token_value(self): \"\"\" Get the value of the next", "return UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls, token): \"\"\"REMOVE is not a", "_initializer_args(self): \"\"\" Get the arguments of the initializer. This is", "1st one was whitespace or return empty string\"\"\" if self.get_following_token_type()", "in order of encountering. Go through them forward and build", "by others. Process SetActions one by one until no more", "The factory for the target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\"", "Operand + :val | a self.target_nodes looks like: ( a", "SetAction. \"\"\" self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if", "token is not None and cls._is_possible_start(token) @classmethod @abstractmethod def _is_possible_start(cls,", "parser in cls._sub_factories()) def _parse(self): for factory in self._sub_factories(): if", "return self.token_pos == 0 def get_last_token_value(self): \"\"\"Get the last token", "\"\" def get_following_token_type(self): \"\"\"Get the token type after the one", "function_name = self.get_next_token_value() if function_name not in self.FUNCTIONS.keys(): # Function", "UpdateExpressionRemoveClauseParser, ] @classmethod def _is_possible_start(cls, token): pass def __init__(self, *args,", "problematic_token = \"<EOF>\" problematic_token_in_near = \"\" else: problematic_token_in_near = problematic_token", "def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector():", "def _parse_with_pos(self): \"\"\" Start parsing the token_list from token_pos for", "Token, ExpressionTokenizer class NestableExpressionParserMixin(object): \"\"\" For nodes that can be", "For the example in the docstring this would be UpdateExpression", "any 3) Process a value 4) skip whitespace if there", "self.skip_white_space() def raise_unexpected_token(self): if self.is_at_end(): problematic_token = \"<EOF>\" problematic_token_in_near =", "but with an operation. Take for example UpdateExpressionValue's grammar: Value", "back multiple times. This Mixin adds re-usability for that type", "\"if_not_exists\": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], \"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod", "no more SetAction. \"\"\" self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class())", "\"\"\" UpdateExpressionSetActions \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs)", "is \"\"\" self.token_list = expression_token_list self.token_pos = token_pos def _initializer_args(self):", "def _binop_factory_class(self): \"\"\" Get a factory that gets the possible", "_parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos()", "import deque from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction,", "def _is_possible_start(cls, token): \"\"\"REMOVE is not a keyword\"\"\" return token.type", "else: problematic_token_in_near = problematic_token = self.get_next_token_value() near = \"\".join( [", "A class extending ExpressionParser \"\"\" def _create_node(self): \"\"\" target_clauses has", "token): return token.type == Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if", "\"\"\" Maker sure the next token is of type `token_type`", "string if non existent.\"\"\" try: return self.token_list[self.token_pos + 1].value except", "SET a=3 >> REMOVE b ) Returns: moto.dynamodb2.ast_nodes.Node: Node of", "whitespace or return empty string\"\"\" if self.token_pos > 1 and", "if there are any Returns: [path, value]: A list containing", "them forward and build the tree bottom up. For simplicity", "\"\" def get_following_token_value(self): \"\"\"Get the token value after the one", "SELECTORs it is also allowed to have whitespaces between brackets", "represented in UpdateExpression's. \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args,", "of the initializer arguments \"\"\" @classmethod @abstractmethod def _nestable_class(cls): \"\"\"", "does store more strict restrictions on how they are represented", "ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction,", "self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection", "process a function of an Update Expression \"\"\" # Map", "the Parser class of the Operands for the Binary operations/actions.", "UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod def", "\"token_pos\": self.token_pos} @abstractmethod def _parse(self): \"\"\" Start parsing the token_list", "children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) while len(self.target_nodes) >= 2:", "non existent.\"\"\" try: return self.token_list[self.token_pos + 1].value except IndexError: return", "that is not allowed in an UpdateExpression) - DOT's: These", "in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True return False def", "token.value.upper() == \"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def", "*args, **kwargs): self.target_clauses = deque() def _parse_target_clause(self, factory_class): \"\"\" Args:", "_is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionRemoveActionParser only gets", "token Args: token_type: A token type Returns: str: The value", "representing the Expression as produced by the factory. \"\"\" assert", "\"\"\" Check whether a token is supposed to be a", "- skip optional spaces - pass closing bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET)", "token(Token): the token to check Returns: bool: True if token", "**self._initializer_args() )._parse_with_pos() return token @classmethod def _is_possible_start(cls, token): return any(", "the token is of type `token_type` \"\"\" if self.get_next_token_type() ==", "UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls): return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction", "be split up with spaces Attributes and attribute_names must be", "UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def", "\"\" def skip_white_space(self): try: while self.get_next_token_type() == Token.WHITESPACE: self.token_pos +=", "more next token \"\"\" try: return self.token_list[self.token_pos] except IndexError: return", "True: self.skip_white_space() if self.is_at_end(): logging.debug(\"End reached\") break elif self._parse_by_a_subfactory(): continue", "the Nodes as how the corresponding tokens where in the", "like: ( SET a=3 >> REMOVE b ) Returns: moto.dynamodb2.ast_nodes.Node:", "factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug( \"Continue where previous parsing ended", "b UpdateExpression / \\ SET a=3 UpdateExpression | REMOVE b", "Node rather than the specific node This way left-deep-descending traversal", "\"\"\" UpdateExpressionSetActionParser only gets called when expecting a SetAction. So", "value after the one that is being parsed or empty", "non existent.\"\"\" try: return self.token_list[self.token_pos + 1].type except IndexError: return", "UpdateExpressionAddActionParser only gets called when expecting an AddAction. So we", "= ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause => SET", "the query part that creates the nested nodes\"\"\" def _parse(self):", "process_selector(self): \"\"\" Process the selector is only called when a", "assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() #", "False def _parse(self): \"\"\" Update Expression is the top-most node", "the top-most node therefore it is expected to end up", "moto.dynamodb2.tokens.Token: or None if no more next token \"\"\" try:", "looks like: ( SET a=3 >> REMOVE b ) Returns:", "in the tokenlist. \"\"\" return self._parse(), self.token_pos def parse(self): return", "@classmethod def _nestable_class(cls): return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction =>", "super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls): return UpdateExpression def", "self._create_node() @abstractmethod def _operand_factory_class(self): \"\"\" Get the Parser class of", "in a nested structure. When a DOT is in a", "any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] ) class UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A", "it does store more strict restrictions on how they are", "for sub_factory in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True return", "class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser", "the nodes in order of encountering. Go through them backwards", "Process the selector is only called when a selector must", "BinOp NestableBinExpression This pattern comes back multiple times. This Mixin", "parsing the token_list from token_pos for the factory type and", "Each Operand can be a grouped value by itself. \"\"\"", "Args: token_type: A token type Returns: str: The value if", "start of an UpdateExpressionPath \"\"\" if token.type == Token.ATTRIBUTE_NAME: return", "Path = Value So we create an UpdateExpressionSetAction Node that", "UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod def _nestable_class(cls):", "This way left-deep-descending traversal will process nodes in order. Continuing", "\"\"\" @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and", "they are represented in UpdateExpression's. \"\"\" def __init__(self, *args, **kwargs):", "b ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing the", "processed Returns: moto.dynamodb2.tokens.Token: or None if no more next token", "This pattern comes back multiple times. This Mixin adds re-usability", "UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod def _is_possible_start(cls, token): pass def __init__(self,", "consider it of structure NestableExpression => TargetClause* NestableExpression => TargetClause*", "UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token, self.token_pos =", "be nested in themselves (recursive) but with an operation. Take", "have to make sure remove is not passed\"\"\" return True", "self.raise_unexpected_token() return self._create_node() @classmethod def make(cls, expression_str): token_list = ExpressionTokenizer.make_list(expression_str)", "of resulting abstract syntax tree \"\"\" @classmethod def is_possible_start(cls, token):", "parsing\"\"\" return self.token_pos == 0 def get_last_token_value(self): \"\"\"Get the last", "when expecting an AddAction. So we should be aggressive on", "operation. Take for example UpdateExpressionValue's grammar: Value => Operand* Value", "target_node = self._nestable_class()( children=[self.target_clauses.pop(), target_node] ) return target_node @six.add_metaclass(abc.ABCMeta) class", "# Map function to the factories for its elements FUNCTIONS", "logging.debug(\"End reached\") break elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return self._create_node()", "UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from", "0: target_node = self._nestable_class()( children=[self.target_clauses.pop(), target_node] ) return target_node @six.add_metaclass(abc.ABCMeta)", "noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions \"\"\"", "return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionRemoveActionParser only gets called when", "structure NestableBinExpression => TargetClause* NestableBinExpression => TargetClause* BinOp NestableBinExpression This", "node, self.token_pos = factory( **self._initializer_args() )._parse_with_pos() return node self.raise_unexpected_token() class", "1 self.skip_white_space() def raise_unexpected_token(self): if self.is_at_end(): problematic_token = \"<EOF>\" problematic_token_in_near", "six from collections import deque from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression,", "Returns: \"\"\" logging.debug( \"Move token pos {pos} to continue parsing", "- :val2 / | | Operand + :val | a", "def process_selector(self): \"\"\" Process the selector is only called when", "cls.FUNCTIONS.keys() else: return False def _parse(self): function_name = self.get_next_token_value() if", "[Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def _is_possible_start(cls, token): return token.type in cls.OPERATION_TOKENS", ") @classmethod @abstractmethod def _nestable_class(cls): return UpdateExpressionSetActions @classmethod @abstractmethod def", "return {\"expression_token_list\": self.token_list, \"token_pos\": self.token_pos} @abstractmethod def _parse(self): \"\"\" Start", "token.value.upper() == \"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def", "=> UpdateExpressionClause* UpdateExpression => UpdateExpressionClause* UpdateExpression If we consider it", "it allows to remain the ordering of the Nodes as", "self.token_list[self.token_pos - 1].value else: return \"\" def get_last_token_type(self): \"\"\"Get the", "Expression as produced by the factory. \"\"\" if len(self.target_nodes) ==", "to be processed Returns: moto.dynamodb2.tokens.Token: or None if no more", "it can be followed by others. Process SetActions one by", "how it is stored which has no special characters -", "self.get_next_token_type() == token_type: token_value = self.get_next_token_value() self.goto_next_significant_token() return token_value else:", "return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def process_selector(self): \"\"\" Process the selector", "@classmethod def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls): return UpdateExpressionRemoveActions", "that has 2 children. Left child Path and right child", "that build a path. For SELECTORs it is also allowed", "binary operation. Returns: class: A class extending ExpressionParser \"\"\" def", "corresponding tokens where in the originating expression. \"\"\" def __init__(self,", "which is root node of resulting abstract syntax tree and", "try: while self.get_next_token_type() == Token.WHITESPACE: self.token_pos += 1 except IndexError:", "_parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos()", "[ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] ) class UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A helper", "= ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value @classmethod def _is_possible_start(cls, token):", "None if non existent.\"\"\" try: return self.token_list[self.token_pos + 1].type except", "def _nestable_class(cls): return UpdateExpression def _parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class) def", "can be between all these elements that build a path.", "end of the expression. \"\"\" while True: self.skip_white_space() if self.is_at_end():", "than the specific node This way left-deep-descending traversal will process", "fc=factory_class.__class__.__name__ ) ) # noinspection PyProtectedMember ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos()", "check Returns: bool: True if token is the start of", "return token.type == Token.ATTRIBUTE and token.value.upper() == \"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser):", "if not raise unexpected token Args: token_type: A token type", "process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def parse_path(self): \"\"\" A path is", "return self._create_node() @abstractmethod def _operand_factory_class(self): \"\"\" Get the Parser class", "between brackets and numbers but the number cannot be split", "\"\"\" self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type()", "**self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin):", "be checked Returns: bool: Whether the token could be the", "def _is_possible_start(cls, token): return any(parser.is_possible_start(token) for parser in cls._sub_factories()) def", "one that is being parsed or empty string if non", "_nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod def _nestable_class(cls): return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser):", "return UpdateExpressionAddActionParser @classmethod def _nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser):", "def is_next_token_start_of_patch_chain(self): return self.get_next_token_type() == Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token()", "return token.type == Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN]", "not a keyword\"\"\" return token.type == Token.ATTRIBUTE and token.value.upper() ==", "name of an attribute as how it is stored which", "token and skip all whitespaces\"\"\" self.token_pos += 1 self.skip_white_space() def", "self.get_next_token().value except AttributeError: return None def is_at_end(self): \"\"\"Return boolean indicating", "are any 3) Process a value 4) skip whitespace if", "RuntimeError( \"{class_name} cannot be identified by the next token.\".format( class_name=cls._nestable_class().__name__", "Process value \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space()", "\"\"\" if len(self.target_nodes) == 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node =", "def _parse(self): \"\"\" Update Expression is the top-most node therefore", "self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token) def", "def _parse_target_clause(self, factory_class): \"\"\" Args: factory_class: The factory for the", "approach is taken since it allows to remain the ordering", "that have a name that is not allowed in an", "UpdateExpressionPathParser.is_possible_start(token), ] ) class UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A helper to process", "the tokenlist. \"\"\" return self._parse(), self.token_pos def parse(self): return self._parse()", "empty string\"\"\" if self.get_following_token_type() == Token.WHITESPACE: try: return self.token_list[self.token_pos +", "| \\ UpdateExpressionValue BinOp Operand / | | | |", "always have positive indexes\" logging.debug(\"We are out of range so", "return self.token_list[self.token_pos + 2].value except IndexError: return \"\" else: return", "\"\" else: problematic_token_in_near = problematic_token = self.get_next_token_value() near = \"\".join(", "list containing the Path node and the AttributeValue nodes \"\"\"", "parsed if 1st one was whitespace or return empty string\"\"\"", "originating expression. \"\"\" def __init__(self, *args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs)", "PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions \"\"\" def", "None if no more next token \"\"\" try: return self.get_next_token().value", "_nestable_class(cls): return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction => Path =", "UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A grouped value is an Update", "get_following_token_type(self): \"\"\"Get the token type after the one that is", "at start of the parsing\"\"\" return self.token_pos == 0 def", "self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause => REMOVE", "a grouped value by itself. \"\"\" def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value,", "bottom up. This way left-deep-descending traversal will process nodes in", "token): return token.type == Token.ATTRIBUTE and token.value.upper() == \"ADD\" class", "and token.value.upper() == \"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod", "that will be created. \"\"\" def _create_node(self): \"\"\" target_clauses has", "] ) class UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A helper to process a", "\"\"\"Get the token value after the one that is being", "tree bottom up. This way left-deep-descending traversal will process nodes", "Returns: class: \"\"\" @abstractmethod def _binop_factory_class(self): \"\"\" Get a factory", "abstractmethod import abc import six from collections import deque from", "tokens where in the originating expression. \"\"\" def __init__(self, *args,", "self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection", "] @classmethod def _is_possible_start(cls, token): pass def __init__(self, *args, **kwargs):", "self.token_pos > 0: return self.token_list[self.token_pos - 1].value else: return \"\"", "| | | UpdateExpressionValue BinOp Operand - :val2 / |", "is in a path expression it is never part of", "self._nestable_class()( children=[self.target_clauses.pop(), target_node] ) return target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser: \"\"\"Abstract", "return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token)", ")._parse_with_pos() else: token, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return token", "UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions", "return True return False def _parse(self): \"\"\" Update Expression is", "UpdateExpression => UpdateExpressionClause* UpdateExpression If we consider it of structure", "to be processed Returns: str: value or None if no", "of type `token_type` \"\"\" if self.get_next_token_type() == token_type: token_value =", "noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type", "AttributeError: return None def get_next_token(self): \"\"\" Get the next token", "strict restrictions on how they are represented in UpdateExpression's. \"\"\"", "created. \"\"\" def _create_node(self): \"\"\" target_clauses has the nodes in", "means to descent into a MAP. We will call each", "return \"\" def get_last_token_type(self): \"\"\"Get the last token type that", "or return empty string\"\"\" if self.get_following_token_type() == Token.WHITESPACE: try: return", "self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token()", "def process_token_of_type(self, token_type): \"\"\" Maker sure the next token is", "function_elements = [function_name] function_arguments = self.FUNCTIONS[function_name] for i, func_elem_factory in", "**kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls, token): raise", "Value If we consider it of structure NestableBinExpression => TargetClause*", "import six from collections import deque from moto.dynamodb2.parsing.ast_nodes import (", "def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper() ==", "when expecting a RemoveAction. So we should be aggressive on", "that creates the nested nodes\"\"\" def _parse(self): \"\"\" UpdateExpressionSetActions is", "and right child Value. \"\"\" @classmethod def _is_possible_start(cls, token): return", "1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node = UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(),", "for the query part that creates the nested nodes\"\"\" def", "Operand - :val2 / | | Operand + :val |", "@abstractmethod def _is_possible_start(cls, token): \"\"\" Args: token(moto.dynamodb2.tokens.Token): Returns: bool: True", "we consider it of structure NestableExpression => TargetClause* NestableExpression =>", "token.type == Token.ATTRIBUTE and token.value.upper() != \"REMOVE\": \"\"\"We have to", "class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction => Path = Value So we", "is being parsed or None if non existent.\"\"\" try: return", "token): \"\"\"REMOVE is not a keyword\"\"\" return token.type == Token.ATTRIBUTE", "is not passed\"\"\" return True return False def _parse(self): return", "Token.ATTRIBUTE and token.value.upper() == \"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\"", "class of the Operands for the Binary operations/actions. Returns: class:", "@classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionDeleteAction(children=self._parse_path_and_value())", "bottom up. For simplicity docstring will use Operand Node rather", "We will call each descend a patch chain - SELECTORs:", "nested in themselves (recursive). Take for example UpdateExpression's grammar: UpdateExpression", "> 0: target_node = self._nestable_class()( children=[self.target_clauses.pop(), target_node] ) return target_node", "(ast, token_pos): tuple of AST which is root node of", "else: self.raise_unexpected_token() def goto_next_significant_token(self): \"\"\"Continue past current token and skip", "type after the one that is being parsed or None", "Go through them forward and build the tree bottom up.", "Token.OPEN_SQUARE_BRACKET def process_selector(self): \"\"\" Process the selector is only called", "example of an UpdateExpression: For example SET a=3 REMOVE b", "FUNCTIONS = { \"if_not_exists\": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], \"list_append\": [UpdateExpressionOperandParser,", "is not a keyword\"\"\" return token.type == Token.ATTRIBUTE and token.value.upper()", "UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionAddActionsParser(", "self.token_list = expression_token_list self.token_pos = token_pos def _initializer_args(self): return {\"expression_token_list\":", "self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def", "Node that has 2 children. Left child Path and right", "@abstractmethod def _initializer_args(self): \"\"\" Get the arguments of the initializer.", "self.get_next_token_value() near = \"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(),", "InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser): \"\"\" For nodes that can be", "# noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions", ") target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) > 0: target_node =", "called when expecting a RemoveAction. So we should be aggressive", "self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise InvalidTokenException(problematic_token, near) class", "return None\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos - 1].type", "self._create_node() @classmethod def make(cls, expression_str): token_list = ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse()", "\"\"\" return self._parse(), self.token_pos def parse(self): return self._parse() def get_next_token_type(self):", "nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\"", "parse(self): return self._parse() def get_next_token_type(self): \"\"\" Get the type of", "return None def get_next_token(self): \"\"\" Get the next token to", "near = \"\".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ]", "be a grouped value by itself. \"\"\" def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET)", "string\"\"\" if self.get_following_token_type() == Token.WHITESPACE: try: return self.token_list[self.token_pos + 2].value", "raise RuntimeError( \"{class_name} cannot be identified by the next token.\".format(", "For SELECTORs it is also allowed to have whitespaces between", "UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar Operand* => AttributeValue Operand* => UpdateExpressionFunction Operand*", "def get_last_token_value(self): \"\"\"Get the last token that was correctly parsed", "of encountering. Go through them forward and build the tree", "all these elements that build a path. For SELECTORs it", "position in the tokenlist. \"\"\" return self._parse(), self.token_pos def parse(self):", "UpdateExpressionValue BinOp Operand / | | | | UpdateExpressionValue BinOp", "0 return target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" Parser to create", "| | | | UpdateExpressionValue BinOp Operand - :val2 /", "allowed in an UpdateExpression) - DOT's: These are used to", "+ 1].value except IndexError: return \"\" def get_following_token_type(self): \"\"\"Get the", "UpdateExpressionSetClauseParser Returns: \"\"\" logging.debug( \"Move token pos {pos} to continue", "return self._parse(), self.token_pos def parse(self): return self._parse() def get_next_token_type(self): \"\"\"", "was correctly parsed if 1st one was whitespace or return", "_is_possible_start(cls, token): return token.type == Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS =", "for {cn}\".format( cn=self.__class__.__name__ ) target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) >", "target_node = UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) assert", "empty string\"\"\" if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE:", "it is also allowed to have whitespaces between brackets and", "PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type ==", "others. Process SetActions one by one until no more SetAction.", "an AST representing the Expression as produced by the factory.", "UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector,", "self.get_next_token_value() if function_name not in self.FUNCTIONS.keys(): # Function names are", "NestableExpressionParserMixin(object): \"\"\" For nodes that can be nested in themselves", "True if token is a possible start for entries processed", "used to decent in a nested structure. When a DOT", "that is surrounded by round brackets. Each Operand can be", "token): \"\"\" Check whether a token is supposed to be", "nodes\"\"\" def _parse(self): \"\"\" UpdateExpressionSetActions is inside the expression so", "the type of the next token to be processed Returns:", "value clause that is surrounded by round brackets. Each Operand", "any(parser.is_possible_start(token) for parser in cls._sub_factories()) def _parse(self): for factory in", "the resulting token_pos. Returns: (ast, token_pos): tuple of AST which", "\"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod def", "def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls): return UpdateExpressionRemoveActions class", ")._parse_with_pos() self.skip_white_space() value, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return", "self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember", "\"\"\"Get the last token type that was correctly parsed or", "UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator,", "nested in themselves (recursive) but with an operation. Take for", "if there are any 3) Process equal-sign token 4) skip", "string\"\"\" if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE: return", "= Value So we create an UpdateExpressionSetAction Node that has", "datatypes like a list. Whitespaces can be between all these", "None def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the 2nd following token that was", "the 2nd last token that was correctly parsed if last", "RemoveAction => Path = Value So we create an UpdateExpressionSetAction", "\"\"\"Returns the parser for the query part that creates the", ":val - :val2 UpdateExpressionValue / | \\ UpdateExpressionValue BinOp Operand", "__init__(self, expression_token_list, token_pos=0): \"\"\" Args: expression_token_list: token_pos(int): Location where parsing", "UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] ) class UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A helper to", "1].type except IndexError: return None def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the 2nd", ">> + >> :val >> - >> :val2 ) Returns:", ") raise InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser): \"\"\" For nodes that", "bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser):", "return attr_value @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE_VALUE", "return UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls): return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod", "call each descend a patch chain - SELECTORs: E.g.: [1]", "self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return self._create_node() @abstractmethod def _operand_factory_class(self): \"\"\"", "_is_possible_start(cls, token): \"\"\" Args: token(Token): the token to be checked", "\"\"\" # noinspection PyProtectedMember ast, self.token_pos = factory_class( **self._initializer_args() )._parse_with_pos()", "class: \"\"\" @abstractmethod def _binop_factory_class(self): \"\"\" Get a factory that", "\"\"\" self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return self.get_next_token_type()", "UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue,", "spaces Attributes and attribute_names must be separated with DOT's. Returns:", "part within an Item. DynamoDB does not impose much restrictions", "in enumerate(function_arguments): func_elem, self.token_pos = func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if", "\"\"\"Continue past current token and skip all whitespaces\"\"\" self.token_pos +=", "token.type == Token.ATTRIBUTE and token.value.upper() == \"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\"", "Path Operand* => GroupedValue \"\"\" @classmethod def _sub_factories(cls): return [", "UpdateExpressionClause* UpdateExpression If we consider it of structure NestableExpression =>", "def _is_possible_start(cls, token): return token.type == Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS", "single {nc} in {nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token() return", "grouped value by itself. \"\"\" def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos", "in cls.FUNCTIONS.keys() else: return False def _parse(self): function_name = self.get_next_token_value()", "parsed or return None\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos", "super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls, token): raise RuntimeError(", "spaces - pass closing bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER)", "nodes that can be nested in themselves (recursive). Take for", "Get a factory that gets the possible binary operation. Returns:", "def _parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for sub_factory in", "token.type in cls.OPERATION_TOKENS def _parse(self): operation_value = self.get_next_token_value() assert operation_value", "| REMOVE b self.target_clauses looks like: ( SET a=3 >>", "The value if the token is of type `token_type` \"\"\"", "\"\"\" @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\"", "if token is the start of a function. \"\"\" if", "class: The class of the Nodes that will be created.", "token type Returns: str: The value if the token is", "there are any \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos()", "noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls, token): \"\"\"REMOVE is", "def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod def _nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta)", "AddAction. So we should be aggressive on raising invalid Tokens.", "the token_list from token_pos for the factory type and also", "def get_next_token_type(self): \"\"\" Get the type of the next token", "def _parse(self): \"\"\" UpdateExpressionSetActionParser only gets called when expecting a", "and token_pos is the position in the tokenlist. \"\"\" return", "a possible start for entries processed by `cls` \"\"\" def", "target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) assert len(self.target_nodes) == 0 return", "return UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE", "self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod def", "Update Expression value clause that is surrounded by round brackets.", "def _nestable_class(cls): \"\"\" Get the class of the Node that", "the next token is of type `token_type` if not raise", "_parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos()", "UpdateExpressionSetClauseParser Returns: \"\"\" # noinspection PyProtectedMember ast, self.token_pos = factory_class(", "self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast])", "except IndexError: return \"\" else: return \"\" def skip_white_space(self): try:", "type `token_type` if not raise unexpected token Args: token_type: A", "UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return", "_binop_factory_class(self): \"\"\" Get a factory that gets the possible binary", "gets the possible binary operation. Returns: class: A class extending", "UpdateExpressionAddActionParser @classmethod def _nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def", "path. For SELECTORs it is also allowed to have whitespaces", "= token_pos @abstractmethod def _initializer_args(self): \"\"\" Get the arguments of", "# noinspection PyProtectedMember ast, self.token_pos = factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast)", ")._parse_with_pos() self.target_nodes.append(ast) logging.debug( \"Continue where previous parsing ended {token_pos}\".format( token_pos=self.token_pos", "{pos} to continue parsing with specific factory class {fc}\".format( pos=self.token_pos,", "Paths are selectors within items to specify a part within", "_parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args()", "@classmethod def _is_possible_start(cls, token): return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ]", "value or None if no more next token \"\"\" try:", "nodes in order of encountering. Go through them backwards and", "def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self):", "skip whitespace if there are any \"\"\" path, self.token_pos =", "- :val2 UpdateExpressionValue / | \\ UpdateExpressionValue BinOp Operand /", "\"\"\" A helper to process a function of an Update", "\"\"\" self.token_list = expression_token_list self.token_pos = token_pos def _initializer_args(self): return", "**self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug( \"Continue where previous parsing ended {token_pos}\".format(", "self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value @classmethod def _is_possible_start(cls, token): return token.type", "token is a possible start for entries processed by `cls`", "UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls, token):", "these elements that build a path. For SELECTORs it is", "by the next token.\".format( class_name=cls._nestable_class().__name__ ) ) @classmethod @abstractmethod def", "Token.ATTRIBUTE and token.value.upper() != \"REMOVE\": \"\"\"We have to make sure", "UpdateExpression / \\ SET a=3 UpdateExpression | REMOVE b self.target_clauses", "cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause => SET SetActions \"\"\" @classmethod", "\"\"\" SetAction => Path = Value So we create an", "UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return [path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod", "boolean indicating whether we are at start of the parsing\"\"\"", "return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause => REMOVE RemoveActions \"\"\"", "a token is supposed to be a function Args: token(Token):", "an UpdateExpression: For example SET a=3 REMOVE b UpdateExpression /", "| UpdateExpressionValue BinOp Operand - :val2 / | | Operand", "token): raise RuntimeError( \"{class_name} cannot be identified by the next", "@six.add_metaclass(abc.ABCMeta) class ExpressionParser: \"\"\"Abstract class\"\"\" def __init__(self, expression_token_list, token_pos=0): \"\"\"", "def __init__(self, *args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes = deque()", "created that would be nested. For the example in the", "order. Continuing the example of an UpdateExpression: For example SET", "UpdateExpressionRemoveClause => REMOVE RemoveActions \"\"\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token()", "self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser): \"\"\" For", "if no more next token \"\"\" try: return self.get_next_token().type except", "in cls._sub_factories()) def _parse(self): for factory in self._sub_factories(): if factory.is_possible_start(self.get_next_token()):", "token value after the one that is being parsed or", "_initializer_args(self): return {\"expression_token_list\": self.token_list, \"token_pos\": self.token_pos} @abstractmethod def _parse(self): \"\"\"", "self.skip_white_space() if self.is_at_end(): logging.debug(\"End reached\") break elif self._parse_by_a_subfactory(): continue else:", "\"\"\" Start parsing the token_list from token_pos for the factory", "class_name=cls._nestable_class().__name__ ) ) @classmethod @abstractmethod def _nestable_class(cls): return UpdateExpressionSetActions @classmethod", "class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos =", "we should be aggressive on raising invalid Tokens. We can", "=> TargetClause* BinOp NestableBinExpression This pattern comes back multiple times.", "DOT's. Returns: UpdateExpressionPath: \"\"\" self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def", "def is_next_token_start_of_selector(self): return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def process_selector(self): \"\"\" Process", "= token_pos def _initializer_args(self): return {\"expression_token_list\": self.token_list, \"token_pos\": self.token_pos} @abstractmethod", "class of the Nodes that will be created. \"\"\" def", "_is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser):", "token, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return token @classmethod def", "one until no more SetAction. \"\"\" self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token()", "token.type == Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod", "@six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser only gets called", "path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() value, self.token_pos =", "token.value.upper() == \"SET\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos", "self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token)", "(recursive) but with an operation. Take for example UpdateExpressionValue's grammar:", "get_next_token_value(self): \"\"\" Get the value of the next token to", "func_elem, self.token_pos = func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if i +", "+ >> :val >> - >> :val2 ) Returns: moto.dynamodb2.ast_nodes.Node:", "UpdateExpressionSetAction Node that has 2 children. Left child Path and", "self.get_next_token_value() self.goto_next_significant_token() return token_value else: self.raise_unexpected_token() def goto_next_significant_token(self): \"\"\"Continue past", "Returns: UpdateExpressionPath: \"\"\" self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self):", "InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class NestableExpressionParserMixin(object): \"\"\" For", "_parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return", "UpdateExpression def _parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for sub_factory", "never part of an attribute name but always means to", "These are used to select an element in ordered datatypes", "the factory. \"\"\" if len(self.target_nodes) == 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else:", "Returns: \"\"\" # noinspection PyProtectedMember ast, self.token_pos = factory_class( **self._initializer_args()", "nodes that can be nested in themselves (recursive) but with", "@classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper()", "as produced by the factory. \"\"\" assert len(self.target_clauses) > 0,", "and token.value.upper() == \"SET\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast,", "be identified by the next token.\".format( class_name=cls._nestable_class().__name__ ) ) @classmethod", "that type of pattern. This approach is taken since it", "empty string\"\"\" if self.token_pos > 0: return self.token_list[self.token_pos - 1].value", "token that was correctly parsed if 1st one was whitespace", "of AST which is root node of resulting abstract syntax", "passed\"\"\" return True return False def _parse(self): return self.process_path() def", "the next token to be processed Returns: str: value or", "return self.token_list[self.token_pos - 2].value else: return \"\" def get_following_token_value(self): \"\"\"Get", "it is stored which has no special characters - ATTRIBUTE_NAME:", "and build the tree bottom up. This way left-deep-descending traversal", ":val >> - >> :val2 ) Returns: moto.dynamodb2.ast_nodes.Node: Node of", "value by itself. \"\"\" def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos =", "return [path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return", "a name that is not allowed in an UpdateExpression) -", "len(self.token_list) def is_at_start(self): \"\"\"Return boolean indicating whether we are at", "token): \"\"\" Args: token(Token): the token to be checked Returns:", "cls.OPERATION_TOKENS def _parse(self): operation_value = self.get_next_token_value() assert operation_value in self.OPERATION_TOKENS", "return node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value = ExpressionAttributeValue(", "_operand_factory_class(self): \"\"\" Get the Parser class of the Operands for", "UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction. So we", "class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self):", "PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type ==", "_parse(self): \"\"\" UpdateExpressionSetActionParser only gets called when expecting a SetAction.", "@classmethod @abstractmethod def _nestable_class(cls): return UpdateExpressionSetActions @classmethod @abstractmethod def _nested_expression_parser_class(cls):", "correctly parsed if 1st one was whitespace or return empty", "if token is a possible start for entries processed by", "the following: 1) Process path 2) skip whitespace if there", "will be created. \"\"\" def _create_node(self): \"\"\" target_clauses has the", "is inside the expression so it can be followed by", "@classmethod def _nestable_class(cls): return UpdateExpression def _parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class)", "if there are any 3) Process a value 4) skip", "self.token_pos = factory( **self._initializer_args() )._parse_with_pos() return node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser):", "are at end of the parsing\"\"\" return self.token_pos == len(self.token_list)", "= UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod", "order of encountering. Go through them backwards and build the", "the next token to be processed Returns: str: Token type", "def _nestable_class(cls): return UpdateExpressionSetActions @classmethod @abstractmethod def _nested_expression_parser_class(cls): \"\"\"Returns the", "gets called when expecting a SetAction. So we should be", "return None def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the 2nd last token that", "Continuing the example of an UpdateExpression: For example SET a=3", "return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for sub_factory in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()):", "== Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ):", "token is the start of a function. \"\"\" if token.type", "_is_possible_start(cls, token): return token.type in cls.OPERATION_TOKENS def _parse(self): operation_value =", "of the expression. \"\"\" while True: self.skip_white_space() if self.is_at_end(): logging.debug(\"End", "decent in a nested structure. When a DOT is in", "else: target_node = UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ] )", "\"\"\" For nodes that can be nested in themselves (recursive).", "where in the originating expression. \"\"\" def __init__(self, *args, **kwargs):", "for entries processed by `cls` \"\"\" def _parse_with_pos(self): \"\"\" Start", "IndexError: return \"\" def get_following_token_type(self): \"\"\"Get the token type after", "past current token and skip all whitespaces\"\"\" self.token_pos += 1", "def parse(self): return self._parse() def get_next_token_type(self): \"\"\" Get the type", "and build the tree bottom up. For simplicity docstring will", "== Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token()", "self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause =>", "Returns: [path, value]: A list containing the Path node and", "or empty string if non existent.\"\"\" try: return self.token_list[self.token_pos +", "@classmethod def _is_possible_start(cls, token): \"\"\" Check whether a token is", "{token_pos}\".format( token_pos=token_pos ) ) self.token_pos = token_pos @abstractmethod def _initializer_args(self):", "a function of an Update Expression \"\"\" # Map function", "\"\"\" UpdateExpressionRemoveClause => REMOVE RemoveActions \"\"\" def _parse(self): assert self.is_possible_start(self.get_next_token())", "ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( \"Continue where previous parsing", "UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return token @classmethod def _is_possible_start(cls, token): return", "UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionRemoveActionParser only gets called when expecting", "@abstractmethod def _binop_factory_class(self): \"\"\" Get a factory that gets the", "path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class", "expression. \"\"\" def __init__(self, *args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes", "Returns: moto.dynamodb2.tokens.Token: or None if no more next token \"\"\"", "Start parsing the token_list from token_pos for the factory type", "with specific factory class {fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__ ) ) #", "if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True return False def _parse(self): \"\"\"", "\"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space()", "ExpressionParser \"\"\" def _create_node(self): \"\"\" target_clauses has the nodes in", "def _nestable_class(cls): return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction => Path", "None and cls._is_possible_start(token) @classmethod @abstractmethod def _is_possible_start(cls, token): \"\"\" Args:", "to end up at the end of the expression. \"\"\"", "class ExpressionParser: \"\"\"Abstract class\"\"\" def __init__(self, expression_token_list, token_pos=0): \"\"\" Args:", "or None if no more next token \"\"\" try: return", "Item. DynamoDB does not impose much restrictions on the data", "== 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node = UpdateExpressionValue( children=[ self.target_nodes.popleft(),", "token_type: A token type Returns: str: The value if the", "Location where parsing is \"\"\" self.token_list = expression_token_list self.token_pos =", "element in ordered datatypes like a list. Whitespaces can be", "=> Path Operand* => GroupedValue \"\"\" @classmethod def _sub_factories(cls): return", "self.token_list[self.token_pos - 1].type else: return None def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the", "operations/actions. Returns: class: \"\"\" @abstractmethod def _binop_factory_class(self): \"\"\" Get a", "is comprised of: - Attribute: the name of an attribute", "NestableExpressionParserMixin): \"\"\" UpdateExpressionSetActions \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args,", "Expression \"\"\" # Map function to the factories for its", "that was correctly parsed if 1st one was whitespace or", "1].value else: return \"\" def get_last_token_type(self): \"\"\"Get the last token", "Value. \"\"\" @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self):", "token to check Returns: bool: True if token is the", "BinOp Operand - :val2 / | | Operand + :val", ":val2 ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing the", "== 0 return target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" Parser to", "in the originating expression. \"\"\" def __init__(self, *args, **kwargs): self.target_clauses", "is not allowed in an UpdateExpression) - DOT's: These are", ")._parse_with_pos() return node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value =", "should be aggressive on raising invalid Tokens. We can thus", "\"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod", "token could be the start of an UpdateExpressionPath \"\"\" if", "whitespace if there are any 3) Process value \"\"\" path,", "any 3) Process equal-sign token 4) skip whitespace if there", "tuple of AST which is root node of resulting abstract", "self.token_pos = factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug( \"Continue where previous", "be the start of an UpdateExpressionPath \"\"\" if token.type ==", "= { \"if_not_exists\": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], \"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser],", "to remain the ordering of the Nodes as how the", "/ \\ SET a=3 UpdateExpression | REMOVE b self.target_clauses looks", "UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else:", "how the corresponding tokens where in the originating expression. \"\"\"", "UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens", ")._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls, token):", "target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\" Parser to create update expressions", "self.token_pos = token_pos @abstractmethod def _initializer_args(self): \"\"\" Get the arguments", "target_node] ) return target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser: \"\"\"Abstract class\"\"\" def", "node This way left-deep-descending traversal will process nodes in order.", "3) Process a value 4) skip whitespace if there are", "else: return False def _parse(self): function_name = self.get_next_token_value() if function_name", "docstring will use Operand Node rather than the specific node", "implemented by the calling class. See ExpressionParser for an example.", "do the following: 1) Process path 2) skip whitespace if", "UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod def _is_possible_start(cls, token): return any(parser.is_possible_start(token)", "OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def _is_possible_start(cls, token): return token.type", ":val2 UpdateExpressionValue / | \\ UpdateExpressionValue BinOp Operand / |", "] ) while len(self.target_nodes) >= 2: target_node = UpdateExpressionValue( children=[", "create an UpdateExpressionSetAction Node that has 2 children. Left child", "UpdateExpression => UpdateExpressionClause* UpdateExpression => UpdateExpressionClause* UpdateExpression If we consider", "UpdateExpressionSetActions \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self)", "if len(self.target_clauses) == 0: logging.debug( \"Didn't encounter a single {nc}", "UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser): \"\"\" Paths are selectors within items", "that has no special characters except leading # to refer", "return self.get_next_token().value except AttributeError: return None def is_at_end(self): \"\"\"Return boolean", "process nodes in order. Continuing the example of an UpdateExpression:", "IndexError: return None def get_next_token_value(self): \"\"\" Get the value of", "return self.token_list[self.token_pos - 1].value else: return \"\" def get_last_token_type(self): \"\"\"Get", "UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName,", "a self.target_nodes looks like: ( a >> + >> :val", "== Token.ATTRIBUTE and token.value.upper() != \"REMOVE\": \"\"\"We have to make", "if token.type == Token.ATTRIBUTE: return token.value in cls.FUNCTIONS.keys() else: return", "\"\"\"Return boolean indicating whether we are at start of the", "to continue parsing with specific factory class {fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__", "UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause => REMOVE RemoveActions \"\"\" def", "_is_possible_start(cls, token): \"\"\"REMOVE is not a keyword\"\"\" return token.type ==", "for the factory type and also return the resulting token_pos.", "last token that was correctly parsed if last one was", "return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause => SET SetActions \"\"\"", "__init__(self, *args, **kwargs): self.target_clauses = deque() def _parse_target_clause(self, factory_class): \"\"\"", "expression_token_list self.token_pos = token_pos def _initializer_args(self): return {\"expression_token_list\": self.token_list, \"token_pos\":", "not allowed in an UpdateExpression) - DOT's: These are used", "self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self): if self.get_next_token_type()", "therefore it is expected to end up at the end", "UpdateExpression | REMOVE b self.target_clauses looks like: ( SET a=3", "<reponame>orenmazor/moto import logging from abc import abstractmethod import abc import", "\"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser,", "\"SET\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionSetActionsParser(", "one was whitespace or return empty string\"\"\" if self.token_pos >", "Returns: str: The value if the token is of type", "opening bracket - skip optional spaces - read numeric literal", "self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast])", ")._parse_with_pos() function_elements.append(func_elem) if i + 1 < len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA)", "return self.get_next_token().type except AttributeError: return None def get_next_token(self): \"\"\" Get", "parse_path(self): \"\"\" A path is comprised of: - Attribute: the", "def _parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else:", "has the nodes in order of encountering. Go through them", "placeholder that has no special characters except leading # to", "+= 1 except IndexError: assert self.token_pos > 0, \"We should", "we are at start of the parsing\"\"\" return self.token_pos ==", "bool: True if token is a possible start for entries", "factory type and also return the resulting token_pos. Returns: (ast,", "being parsed or None if non existent.\"\"\" try: return self.token_list[self.token_pos", "return empty string\"\"\" if self.token_pos > 1 and self.get_last_token_type() ==", "@classmethod def _is_possible_start(cls, token): return token.type in cls.OPERATION_TOKENS def _parse(self):", "range so end is reached\") def process_token_of_type(self, token_type): \"\"\" Maker", "of the Nodes that will be created. \"\"\" def _create_node(self):", "in the docstring this would be UpdateExpression Returns: class: The", "start of the parsing\"\"\" return self.token_pos == 0 def get_last_token_value(self):", "return self.get_next_token_type() == Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self):", "except AttributeError: return None def get_next_token(self): \"\"\" Get the next", "parsed or empty string if non existent.\"\"\" try: return self.token_list[self.token_pos", "expression. \"\"\" def __init__(self, *args, **kwargs): self.target_clauses = deque() def", "descend a patch chain - SELECTORs: E.g.: [1] These are", "[ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod def _is_possible_start(cls, token):", "token.value in cls.FUNCTIONS.keys() else: return False def _parse(self): function_name =", "\"\" else: return \"\" def skip_white_space(self): try: while self.get_next_token_type() ==", "node therefore it is expected to end up at the", "the originating expression. \"\"\" def __init__(self, *args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args,", "False def _parse(self): return self.process_path() def process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes)", "**kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls, token): raise RuntimeError( \"{class_name} cannot", "**self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls,", "make(cls, expression_str): token_list = ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\"", "\"Continue where previous parsing ended {token_pos}\".format( token_pos=self.token_pos ) ) def", "def is_at_start(self): \"\"\"Return boolean indicating whether we are at start", "token type after the one that is being parsed or", "\"\"\" def _parse_with_pos(self): \"\"\" Start parsing the token_list from token_pos", "the token to be checked Returns: bool: Whether the token", "of structure NestableExpression => TargetClause* NestableExpression => TargetClause* NestableExpression This", "up with spaces Attributes and attribute_names must be separated with", "themselves (recursive) but with an operation. Take for example UpdateExpressionValue's", "UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod", "tokenlist. \"\"\" return self._parse(), self.token_pos def parse(self): return self._parse() def", "are at start of the parsing\"\"\" return self.token_pos == 0", "expression_token_list, token_pos=0): \"\"\" Args: expression_token_list: token_pos(int): Location where parsing is", "self.token_list, \"token_pos\": self.token_pos} @abstractmethod def _parse(self): \"\"\" Start parsing the", "UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self):", "get_following_token_value(self): \"\"\"Get the token value after the one that is", "whether a token is supposed to be a function Args:", "- DOT's: These are used to decent in a nested", "tree and token_pos is the position in the tokenlist. \"\"\"", "self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) )", "that was correctly parsed or return None\"\"\" if self.token_pos >", "if the token is of type `token_type` \"\"\" if self.get_next_token_type()", "_is_possible_start(cls, token): pass def __init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs)", "return False def _parse(self): function_name = self.get_next_token_value() if function_name not", "class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def _is_possible_start(cls, token):", "and token.value.upper() == \"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod", "**kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls): return UpdateExpression def _parse_expression_clause(self, factory_class):", "token.value.upper() != \"REMOVE\": \"\"\"We have to make sure remove is", "cannot be identified by the next token.\".format( class_name=cls._nestable_class().__name__ ) )", "\"\"\" def __init__(self, *args, **kwargs): self.target_clauses = deque() def _parse_target_clause(self,", "self.token_pos += 1 self.skip_white_space() def raise_unexpected_token(self): if self.is_at_end(): problematic_token =", "- >> :val2 ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST", "children=[self.target_clauses.pop(), target_node] ) return target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser: \"\"\"Abstract class\"\"\"", "self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return", "ended {token_pos}\".format( token_pos=self.token_pos ) ) def _parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()):", "Whether the token could be the start of an UpdateExpressionPath", "else: return \"\" def get_last_token_type(self): \"\"\"Get the last token type", "its elements FUNCTIONS = { \"if_not_exists\": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ],", "the factories for its elements FUNCTIONS = { \"if_not_exists\": [", "Operand* + Value Value => Operand* - Value If we", "@classmethod def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod def _nestable_class(cls): return UpdateExpressionAddActions", "\"No nodes for {cn}\".format( cn=self.__class__.__name__ ) target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while", "where previous parsing ended {token_pos}\".format( token_pos=self.token_pos ) ) def _parse(self):", "does not impose much restrictions on the data it stores", "__init__(self, *args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls,", "self.raise_unexpected_token() def goto_next_significant_token(self): \"\"\"Continue past current token and skip all", ") class UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A helper to process a function", "have whitespaces between brackets and numbers but the number cannot", "next token to be processed Returns: str: value or None", "**self._initializer_args() )._parse_with_pos() self.skip_white_space() value, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space()", "can thus do the following: 1) Process path 2) skip", "else: token, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return token @classmethod", "process_attribute_identifying_token(self): if self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() == Token.ATTRIBUTE_NAME:", "goto_next_significant_token(self): \"\"\"Continue past current token and skip all whitespaces\"\"\" self.token_pos", "any 3) Process value \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args()", "the factory type. Returns: moto.dynamodb2.ast_nodes.Node: AST which is root node", "the last token type that was correctly parsed or return", "_is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return UpdateExpressionOperandParser def _binop_factory_class(self):", "SET a=3 UpdateExpression | REMOVE b self.target_clauses looks like: (", "if non existent.\"\"\" try: return self.token_list[self.token_pos + 1].value except IndexError:", "\"\"\" Get a factory that gets the possible binary operation.", "else: return None def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the 2nd last token", "the possible binary operation. Returns: class: A class extending ExpressionParser", "allowed to have whitespaces between brackets and numbers but the", "token_pos def _initializer_args(self): return {\"expression_token_list\": self.token_list, \"token_pos\": self.token_pos} @abstractmethod def", "order. Continuing the example of an UpdateExpressionValue: For example value", "restrictions on the data it stores but it does store", "- Value If we consider it of structure NestableBinExpression =>", "*args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls): return", "Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self): return self.get_next_token_type() ==", "token_pos. Returns: (ast, token_pos): tuple of AST which is root", "- pass closing bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET)", "return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod def _is_possible_start(cls,", "\"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod def", "ExpressionParser: \"\"\"Abstract class\"\"\" def __init__(self, expression_token_list, token_pos=0): \"\"\" Args: expression_token_list:", "\"\"\" UpdateExpressionAddActionParser only gets called when expecting an AddAction. So", "> 1 and self.get_last_token_type() == Token.WHITESPACE: return self.token_list[self.token_pos - 2].value", "followed by others. Process SetActions one by one until no", "token_pos is the position in the tokenlist. \"\"\" return self._parse(),", "Returns: dict: A dictionary of the initializer arguments \"\"\" @classmethod", "self.get_following_token_type() == Token.WHITESPACE: try: return self.token_list[self.token_pos + 2].value except IndexError:", "class UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar Operand* => AttributeValue Operand* => UpdateExpressionFunction", "UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls):", "example UpdateExpressionValue's grammar: Value => Operand* Value => Operand* +", "but the number cannot be split up with spaces Attributes", "_parse(self): attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value @classmethod def", "self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) while len(self.target_nodes) >= 2: target_node =", "self.get_next_token_value() assert operation_value in self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser):", "attr_value @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE_VALUE class", "== Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space()", "self.skip_white_space() value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path, value])", "Value So we create an UpdateExpressionSetAction Node that has 2", "assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() #", "token.type == Token.ATTRIBUTE and token.value.upper() == \"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\"", "self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls, token): return token.type ==", "existent.\"\"\" try: return self.token_list[self.token_pos + 1].type except IndexError: return None", "self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser):", "_nestable_class(cls): return UpdateExpression def _parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self):", ")._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): \"\"\"", "return True return False def _parse(self): return self.process_path() def process_path(self):", "clause that is surrounded by round brackets. Each Operand can", "RemoveAction. So we should be aggressive on raising invalid Tokens.", "UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod def _is_possible_start(cls, token): return any(parser.is_possible_start(token) for", "called when a selector must be processed. So do the", "if i + 1 < len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return", "Returns: class: A class extending ExpressionParser \"\"\" def _create_node(self): \"\"\"", "factory for the target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" #", "return self.token_list[self.token_pos + 1].value except IndexError: return \"\" def get_following_token_type(self):", "of a function. \"\"\" if token.type == Token.ATTRIBUTE: return token.value", "Args: token(Token): the token to be checked Returns: bool: Whether", "patch chain - SELECTORs: E.g.: [1] These are used to", "way left-deep-descending traversal will process nodes in order. Continuing the", "create update expressions \"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionSetClauseParser,", "_nested_expression_parser_class(cls): \"\"\"Returns the parser for the query part that creates", "def __init__(self, *args, **kwargs): self.target_clauses = deque() def _parse_target_clause(self, factory_class):", "Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token,", "at end of the parsing\"\"\" return self.token_pos == len(self.token_list) def", "nodes in order. Continuing the example of an UpdateExpressionValue: For", "elements FUNCTIONS = { \"if_not_exists\": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], \"list_append\":", "are any \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space()", "False def _parse(self): function_name = self.get_next_token_value() if function_name not in", "a DOT is in a path expression it is never", "to be processed Returns: str: Token type or None if", "a SetAction. So we should be aggressive on raising invalid", "gets called when expecting a RemoveAction. So we should be", "def get_next_token(self): \"\"\" Get the next token to be processed", "def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token, self.token_pos = UpdateExpressionAttributeValueParser(", "the nodes in order of encountering. Go through them forward", ") ) def _parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()):", "except IndexError: return \"\" def get_following_token_type(self): \"\"\"Get the token type", "self).__init__(*args, **kwargs) self.path_nodes = [] @classmethod def _is_possible_start(cls, token): \"\"\"", "UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos", "=> TargetClause* NestableBinExpression => TargetClause* BinOp NestableBinExpression This pattern comes", "a MAP. We will call each descend a patch chain", "return token_value else: self.raise_unexpected_token() def goto_next_significant_token(self): \"\"\"Continue past current token", "a >> + >> :val >> - >> :val2 )", "in ordered datatypes like a list. Whitespaces can be between", "function_name not in self.FUNCTIONS.keys(): # Function names are case sensitive", "ExpressionParser for an example. Returns: dict: A dictionary of the", "- 1].type else: return None def get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the 2nd", "if token.type == Token.ATTRIBUTE_NAME: return True elif token.type == Token.ATTRIBUTE", "processed by `cls` \"\"\" def _parse_with_pos(self): \"\"\" Start parsing the", "is being parsed or empty string if non existent.\"\"\" try:", "\"\"\" try: return self.get_next_token().type except AttributeError: return None def get_next_token(self):", "UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) while len(self.target_nodes) >=", "indexes\" logging.debug(\"We are out of range so end is reached\")", "_is_possible_start(cls, token): return any(parser.is_possible_start(token) for parser in cls._sub_factories()) def _parse(self):", "gets called when expecting an AddAction. So we should be", "is the top-most node therefore it is expected to end", "A placeholder that has no special characters except leading #", "== Token.ATTRIBUTE_NAME: return True elif token.type == Token.ATTRIBUTE and token.value.upper()", "UpdateExpression If we consider it of structure NestableExpression => TargetClause*", "# noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return", "\"\"\" assert len(self.target_clauses) > 0, \"No nodes for {cn}\".format( cn=self.__class__.__name__", "Operands for the Binary operations/actions. Returns: class: \"\"\" @abstractmethod def", "2nd following token that was correctly parsed if 1st one", "factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( \"Continue where previous parsing ended {token_pos}\".format( token_pos=token_pos", "get_2nd_last_token_value_if_last_was_whitespace(self): \"\"\"Get the 2nd last token that was correctly parsed", "`token_type` \"\"\" if self.get_next_token_type() == token_type: token_value = self.get_next_token_value() self.goto_next_significant_token()", "super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes = [] @classmethod def _is_possible_start(cls, token):", "positive indexes\" logging.debug(\"We are out of range so end is", ":val | a self.target_nodes looks like: ( a >> +", "( a >> + >> :val >> - >> :val2", "Operand* => UpdateExpressionFunction Operand* => Path Operand* => GroupedValue \"\"\"", "| | UpdateExpressionValue BinOp Operand - :val2 / | |", "self.skip_white_space() return [path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token):", "except AttributeError: return None def is_at_end(self): \"\"\"Return boolean indicating whether", "that can be nested in themselves (recursive). Take for example", "a=3 UpdateExpression | REMOVE b self.target_clauses looks like: ( SET", "self.target_nodes.append(ast) logging.debug( \"Continue where previous parsing ended {token_pos}\".format( token_pos=self.token_pos )", "encountering. Go through them backwards and build the tree bottom", "when a selector must be processed. So do the following", "`cls` \"\"\" def _parse_with_pos(self): \"\"\" Start parsing the token_list from", "nodes for {cn}\".format( cn=self.__class__.__name__ ) target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses)", "[UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod def _is_possible_start(cls, token): \"\"\" Check whether", "try: return self.get_next_token().value except AttributeError: return None def is_at_end(self): \"\"\"Return", "# noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return", "self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser): \"\"\"", "pass def __init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod", "the tree bottom up. This way left-deep-descending traversal will process", "value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def", "Token type or None if no more next token \"\"\"", "class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser", "in cls.OPERATION_TOKENS def _parse(self): operation_value = self.get_next_token_value() assert operation_value in", "def get_last_token_type(self): \"\"\"Get the last token type that was correctly", "target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) > 0: target_node = self._nestable_class()(", "this would be UpdateExpression Returns: class: The class of the", "@classmethod def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls): return UpdateExpressionDeleteActions", "=> REMOVE RemoveActions \"\"\" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast,", "token pos {pos} to continue parsing with specific factory class", "update expressions \"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser,", "\"\"\"Return boolean indicating whether we are at end of the", "TargetClause* NestableExpression => TargetClause* NestableExpression This pattern comes back multiple", ") self.token_pos = token_pos @abstractmethod def _initializer_args(self): \"\"\" Get the", "would be UpdateExpression Returns: class: The class of the Nodes", "skip opening bracket - skip optional spaces - read numeric", "round brackets. Each Operand can be a grouped value by", "root node of resulting abstract syntax tree and token_pos is", "keyword\"\"\" return token.type == Token.ATTRIBUTE and token.value.upper() == \"REMOVE\" class", "raise unexpected token Args: token_type: A token type Returns: str:", "supposed to be a function Args: token(Token): the token to", "def _is_possible_start(cls, token): pass def __init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args,", "**kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes = deque() def _parse_target_clause(self, factory_class):", "while True: self.skip_white_space() if self.is_at_end(): logging.debug(\"End reached\") break elif self._parse_by_a_subfactory():", "in {nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token() return self._create_node() class", "\"\"\" Args: token(Token): the token to be checked Returns: bool:", "Nodes as how the corresponding tokens where in the originating", "def _operand_factory_class(self): \"\"\" Get the Parser class of the Operands", "are out of range so end is reached\") def process_token_of_type(self,", "assert len(self.target_nodes) == 0 return target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): \"\"\"", "A path is comprised of: - Attribute: the name of", "self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE: return self.token_list[self.token_pos -", "specific node This way left-deep-descending traversal will process nodes in", "self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember", "i, func_elem_factory in enumerate(function_arguments): func_elem, self.token_pos = func_elem_factory( **self._initializer_args() )._parse_with_pos()", "\"DELETE\" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return", "> 0, \"We should always have positive indexes\" logging.debug(\"We are", "grouped value is an Update Expression value clause that is", "token that was correctly parsed or return empty string\"\"\" if", "whitespace if there are any 3) Process a value 4)", "self.skip_white_space() if self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token() else: break if len(self.target_clauses)", "class extending ExpressionParser \"\"\" def _create_node(self): \"\"\" target_clauses has the", "part that creates the nested nodes\"\"\" def _parse(self): \"\"\" UpdateExpressionSetActions", "UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls):", "factory. \"\"\" assert len(self.target_clauses) > 0, \"No nodes for {cn}\".format(", "factory_class): \"\"\" Args: factory_class: The factory for the target clause", "def process_attribute_identifying_token(self): if self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() ==", "== Token.ATTRIBUTE and token.value.upper() == \"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions", "self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast])", ">> :val2 ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing", "the ordering of the Nodes as how the corresponding tokens", "Get the Parser class of the Operands for the Binary", "_nestable_class(cls): return UpdateExpressionSetActions @classmethod @abstractmethod def _nested_expression_parser_class(cls): \"\"\"Returns the parser", "UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod def _is_possible_start(cls, token): return", "nested structure. When a DOT is in a path expression", "self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self): if self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif", "\"\"\" @abstractmethod def _binop_factory_class(self): \"\"\" Get a factory that gets", "\"\"\" def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos()", "so it can be followed by others. Process SetActions one", "type or None if no more next token \"\"\" try:", "_parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser only gets called when expecting an AddAction.", "self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): \"\"\" UpdateExpressionRemoveClause => REMOVE RemoveActions", "**self._initializer_args() )._parse_with_pos() return node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value", "token.value.upper() == \"ADD\" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def", "re-usability for that type of pattern. This approach is taken", "from collections import deque from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause,", "syntax tree \"\"\" @classmethod def is_possible_start(cls, token): return token is", "[path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token)", "self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self): return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def process_selector(self):", "_parse(self): \"\"\" Start parsing the token_list from token_pos for the", "path is comprised of: - Attribute: the name of an", "): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token() else: break", "InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] function_arguments = self.FUNCTIONS[function_name] for", "of: - Attribute: the name of an attribute as how", "{nc} in {nepc}.\".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token() return self._create_node()", "token_pos=0): \"\"\" Args: expression_token_list: token_pos(int): Location where parsing is \"\"\"", "in order. Continuing the example of an UpdateExpressionValue: For example", "token): return token is not None and cls._is_possible_start(token) @classmethod @abstractmethod", "return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser only", "token_pos): tuple of AST which is root node of resulting", "\"\"\"Get the 2nd last token that was correctly parsed if", "def parse_path(self): \"\"\" A path is comprised of: - Attribute:", "UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls, token): return token.type == Token.OPEN_ROUND_BRACKET class", "the value of the next token to be processed Returns:", "grammar: UpdateExpression => UpdateExpressionClause* UpdateExpression => UpdateExpressionClause* UpdateExpression If we", "\\ SET a=3 UpdateExpression | REMOVE b self.target_clauses looks like:", "must be processed. So do the following actions: - skip", "get_next_token_type(self): \"\"\" Get the type of the next token to", "[ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise InvalidTokenException(problematic_token,", "function to the factories for its elements FUNCTIONS = {", "self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type() ==", "are used to decent in a nested structure. When a", "no more next token \"\"\" try: return self.get_next_token().type except AttributeError:", "to be checked Returns: bool: Whether the token could be", "each descend a patch chain - SELECTORs: E.g.: [1] These", "self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else:", "self.is_at_end(): problematic_token = \"<EOF>\" problematic_token_in_near = \"\" else: problematic_token_in_near =", "the factory. \"\"\" assert len(self.target_clauses) > 0, \"No nodes for", "attributes that have a name that is not allowed in", "token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionSetActionParser only gets called", "Function names are case sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements", "of the parsing\"\"\" return self.token_pos == 0 def get_last_token_value(self): \"\"\"Get", "the originating expression. \"\"\" def __init__(self, *args, **kwargs): self.target_clauses =", "\"\"\" target_clauses has the nodes in order of encountering. Go", "self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection", "Take for example UpdateExpression's grammar: UpdateExpression => UpdateExpressionClause* UpdateExpression =>", ">> REMOVE b ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST", "\"\"\" try: return self.get_next_token().value except AttributeError: return None def is_at_end(self):", "/ | | | | UpdateExpressionValue BinOp Operand - :val2", "= func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if i + 1 <", "elements that build a path. For SELECTORs it is also", "*args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls, token):", "a selector must be processed. So do the following actions:", "\"\"\" Args: expression_token_list: token_pos(int): Location where parsing is \"\"\" self.token_list", "node and the AttributeValue nodes \"\"\" path, self.token_pos = UpdateExpressionPathParser(", "\"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod def", "def _nestable_class(cls): return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction => Path", "it is never part of an attribute name but always", "Expression value clause that is surrounded by round brackets. Each", "_parse(self): for factory in self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node, self.token_pos =", "store more strict restrictions on how they are represented in", "token_type: token_value = self.get_next_token_value() self.goto_next_significant_token() return token_value else: self.raise_unexpected_token() def", "**self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls,", "UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return UpdateExpressionOperandParser def _binop_factory_class(self): return UpdateExpressionValueOperatorParser class", "pos=self.token_pos, fc=factory_class.__class__.__name__ ) ) # noinspection PyProtectedMember ast, token_pos =", "None if no more next token \"\"\" try: return self.token_list[self.token_pos]", "the Expression as produced by the factory. \"\"\" assert len(self.target_clauses)", "more next token \"\"\" try: return self.get_next_token().type except AttributeError: return", "UpdateExpressionOperandParser def _binop_factory_class(self): return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\" A grouped", "identified by the next token.\".format( class_name=cls._nestable_class().__name__ ) ) @classmethod @abstractmethod", "def _parse(self): return self.process_path() def process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def", "\"\"\"REMOVE is not a keyword\"\"\" return token.type == Token.ATTRIBUTE and", "return token.type in cls.OPERATION_TOKENS def _parse(self): operation_value = self.get_next_token_value() assert", "=> AttributeValue Operand* => UpdateExpressionFunction Operand* => Path Operand* =>", "E.g.: [1] These are used to select an element in", "get_last_token_value(self): \"\"\"Get the last token that was correctly parsed or", "`token_type` if not raise unexpected token Args: token_type: A token", "SetActions one by one until no more SetAction. \"\"\" self.skip_white_space()", "self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for sub_factory in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory)", "the parsing\"\"\" return self.token_pos == 0 def get_last_token_value(self): \"\"\"Get the", "more strict restrictions on how they are represented in UpdateExpression's.", "to refer to attributes that have a name that is", "Get the class of the Node that will be created", "+= 1 self.skip_white_space() def raise_unexpected_token(self): if self.is_at_end(): problematic_token = \"<EOF>\"", "be a function Args: token(Token): the token to check Returns:", "self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return self._create_node() @abstractmethod def _operand_factory_class(self): \"\"\" Get", "for example UpdateExpressionValue's grammar: Value => Operand* Value => Operand*", "last one was whitespace or return empty string\"\"\" if self.token_pos", "UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod", "for the factory type. Returns: moto.dynamodb2.ast_nodes.Node: AST which is root", "return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return UpdateExpressionOperandParser def _binop_factory_class(self): return UpdateExpressionValueOperatorParser", "ast, self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return", "def get_next_token_value(self): \"\"\" Get the value of the next token", "the calling class. See ExpressionParser for an example. Returns: dict:", "if len(self.target_nodes) == 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node = UpdateExpressionValue(", "entries processed by `cls` \"\"\" def _parse_with_pos(self): \"\"\" Start parsing", "get_last_token_type(self): \"\"\"Get the last token type that was correctly parsed", "self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return self.get_next_token_type() == Token.DOT def", "= \"<EOF>\" problematic_token_in_near = \"\" else: problematic_token_in_near = problematic_token =", "raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] function_arguments = self.FUNCTIONS[function_name]", "number cannot be split up with spaces Attributes and attribute_names", "special characters except leading # to refer to attributes that", "self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return self._create_node() @classmethod def make(cls, expression_str):", "query part that creates the nested nodes\"\"\" def _parse(self): \"\"\"", "a RemoveAction. So we should be aggressive on raising invalid", "UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node = UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ]", "no more next token \"\"\" try: return self.get_next_token().value except AttributeError:", "nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions", "logging.debug(\"We are out of range so end is reached\") def", "self.get_next_token_type() == Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token()", "UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return", "bool: Whether the token could be the start of an", "Process path 2) skip whitespace if there are any 3)", "by `cls` \"\"\" def _parse_with_pos(self): \"\"\" Start parsing the token_list", "the docstring this would be UpdateExpression Returns: class: The class", "UpdateExpressionPathParser.is_possible_start(token) def _parse(self): \"\"\" UpdateExpressionSetActionParser only gets called when expecting", "**kwargs) self.target_nodes = deque() def _parse_target_clause(self, factory_class): \"\"\" Args: factory_class:", "_parse_by_a_subfactory(self): for sub_factory in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True", "resulting abstract syntax tree \"\"\" @classmethod def is_possible_start(cls, token): return", "If we consider it of structure NestableBinExpression => TargetClause* NestableBinExpression", "True if token is the start of a function. \"\"\"", "self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls, token): raise RuntimeError( \"{class_name}", "DynamoDB does not impose much restrictions on the data it", "whitespace or return empty string\"\"\" if self.get_following_token_type() == Token.WHITESPACE: try:", "and token.value.upper() != \"REMOVE\": \"\"\"We have to make sure remove", "UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A helper to process a function of an", "processed Returns: str: value or None if no more next", "target_node = UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) while", "self.goto_next_significant_token() return token_value else: self.raise_unexpected_token() def goto_next_significant_token(self): \"\"\"Continue past current", "+ 2].value except IndexError: return \"\" else: return \"\" def", "Returns: class: The class of the Nodes that will be", "self.token_list[self.token_pos + 1].type except IndexError: return None def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get", "NestableBinExpression => TargetClause* NestableBinExpression => TargetClause* BinOp NestableBinExpression This pattern", "only gets called when expecting a RemoveAction. So we should", "Left child Path and right child Value. \"\"\" @classmethod def", "for its elements FUNCTIONS = { \"if_not_exists\": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser,", "to have whitespaces between brackets and numbers but the number", "function_elements.append(func_elem) if i + 1 < len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET)", "UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos", "return token.type == Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start(", "= UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else: token, self.token_pos = UpdateExpressionPathParser( **self._initializer_args()", "+ Value Value => Operand* - Value If we consider", "[path, value]: A list containing the Path node and the", "= UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def", "self.is_at_end(): logging.debug(\"End reached\") break elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return", "for the target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" # noinspection", "The class of the Nodes that will be created. \"\"\"", "docstring this would be UpdateExpression Returns: class: The class of", "return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): \"\"\" SetAction => Path = Value", "\"\"\" Get the next token to be processed Returns: moto.dynamodb2.tokens.Token:", "cls._is_possible_start(token) @classmethod @abstractmethod def _is_possible_start(cls, token): \"\"\" Args: token(moto.dynamodb2.tokens.Token): Returns:", "None if no more next token \"\"\" try: return self.get_next_token().type", "be followed by others. Process SetActions one by one until", "=> UpdateExpressionClause* UpdateExpression If we consider it of structure NestableExpression", "IndexError: return None def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the 2nd following token", "self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True return False def _parse(self):", "pattern comes back multiple times. This Mixin adds re-usability for", "whether we are at end of the parsing\"\"\" return self.token_pos", "elif token.type == Token.ATTRIBUTE and token.value.upper() != \"REMOVE\": \"\"\"We have", "Process path 2) skip whitespace if there are any \"\"\"", "selector is only called when a selector must be processed.", "expecting a SetAction. So we should be aggressive on raising", "produced by the factory. \"\"\" if len(self.target_nodes) == 1: return", "\"\"\" UpdateExpressionSetClause => SET SetActions \"\"\" @classmethod def _is_possible_start(cls, token):", "self.get_next_token().type except AttributeError: return None def get_next_token(self): \"\"\" Get the", "the following actions: - skip opening bracket - skip optional", "IndexError: assert self.token_pos > 0, \"We should always have positive", "to check Returns: bool: True if token is the start", "a part within an Item. DynamoDB does not impose much", "any \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return", "that can be nested in themselves (recursive) but with an", "\"We should always have positive indexes\" logging.debug(\"We are out of", "def get_2nd_following_token_value_if_following_was_whitespace(self): \"\"\"Get the 2nd following token that was correctly", "Returns: str: Token type or None if no more next", "sure the next token is of type `token_type` if not", "assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() #", "UpdateExpressionPath: \"\"\" self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return", "= expression_token_list self.token_pos = token_pos def _initializer_args(self): return {\"expression_token_list\": self.token_list,", "thus do the following: 1) Process path 2) skip whitespace", "be nested in themselves (recursive). Take for example UpdateExpression's grammar:", "of resulting abstract syntax tree and token_pos is the position", "example UpdateExpression's grammar: UpdateExpression => UpdateExpressionClause* UpdateExpression => UpdateExpressionClause* UpdateExpression", "UpdateExpressionClause* UpdateExpression => UpdateExpressionClause* UpdateExpression If we consider it of", "is an Update Expression value clause that is surrounded by", "self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space()", "\"\"\" Get the Parser class of the Operands for the", "return True elif token.type == Token.ATTRIBUTE and token.value.upper() != \"REMOVE\":", "by itself. \"\"\" def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos = UpdateExpressionValueParser(", "to be a function Args: token(Token): the token to check", "specify a part within an Item. DynamoDB does not impose", "=> UpdateExpressionFunction Operand* => Path Operand* => GroupedValue \"\"\" @classmethod", "Continuing the example of an UpdateExpressionValue: For example value =>", "AttributeValue nodes \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space()", "items to specify a part within an Item. DynamoDB does", "by the calling class. See ExpressionParser for an example. Returns:", "== Token.WHITESPACE: return self.token_list[self.token_pos - 2].value else: return \"\" def", "with an operation. Take for example UpdateExpressionValue's grammar: Value =>", "= factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug( \"Continue where previous parsing", "that was correctly parsed if last one was whitespace or", "read numeric literal - skip optional spaces - pass closing", "where parsing is \"\"\" self.token_list = expression_token_list self.token_pos = token_pos", "class: A class extending ExpressionParser \"\"\" def _create_node(self): \"\"\" target_clauses", "self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar Operand* => AttributeValue", "whitespace if there are any \"\"\" path, self.token_pos = UpdateExpressionPathParser(", "a factory that gets the possible binary operation. Returns: class:", "] ) assert len(self.target_nodes) == 0 return target_node class UpdateExpressionParser(ExpressionParser,", "\"\"\" UpdateExpressionSetActions is inside the expression so it can be", "( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue,", "parsing ended {token_pos}\".format( token_pos=self.token_pos ) ) def _parse(self): self._parse_target_clause(self._operand_factory_class()) while", "token_value = self.get_next_token_value() self.goto_next_significant_token() return token_value else: self.raise_unexpected_token() def goto_next_significant_token(self):", "is also allowed to have whitespaces between brackets and numbers", "self.FUNCTIONS.keys(): # Function names are case sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token()", "return empty string\"\"\" if self.get_following_token_type() == Token.WHITESPACE: try: return self.token_list[self.token_pos", "while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return self._create_node()", "in an UpdateExpression) - DOT's: These are used to decent", "Token.MINUS_SIGN] @classmethod def _is_possible_start(cls, token): return token.type in cls.OPERATION_TOKENS def", "UpdateExpressionValue: For example value => a + :val - :val2", "if non existent.\"\"\" try: return self.token_list[self.token_pos + 1].type except IndexError:", "self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def", "while len(self.target_nodes) >= 2: target_node = UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(),", ">> :val >> - >> :val2 ) Returns: moto.dynamodb2.ast_nodes.Node: Node", "selector must be processed. So do the following actions: -", "no more next token \"\"\" try: return self.token_list[self.token_pos] except IndexError:", "len(self.target_nodes) == 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node = UpdateExpressionValue( children=[", "the expression. \"\"\" while True: self.skip_white_space() if self.is_at_end(): logging.debug(\"End reached\")", "Attributes and attribute_names must be separated with DOT's. Returns: UpdateExpressionPath:", "UpdateExpressionValue's grammar: Value => Operand* Value => Operand* + Value", "the start of an UpdateExpressionPath \"\"\" if token.type == Token.ATTRIBUTE_NAME:", "after the one that is being parsed or None if", "Get the arguments of the initializer. This is implemented by", "factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser Returns:", "near) class NestableBinExpressionParser(ExpressionParser): \"\"\" For nodes that can be nested", "== Token.ATTRIBUTE and token.value.upper() == \"REMOVE\" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions", "2].value else: return \"\" def get_following_token_value(self): \"\"\"Get the token value", "@classmethod def _is_possible_start(cls, token): return token.type == Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser):", "=> SET SetActions \"\"\" @classmethod def _is_possible_start(cls, token): return token.type", "to create update expressions \"\"\" @classmethod def _sub_factories(cls): return [", "@classmethod def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod def _nestable_class(cls): return UpdateExpressionSetActions", "the Binary operations/actions. Returns: class: \"\"\" @abstractmethod def _binop_factory_class(self): \"\"\"", "class NestableBinExpressionParser(ExpressionParser): \"\"\" For nodes that can be nested in", "UpdateExpression Returns: class: The class of the Nodes that will", "Args: token(moto.dynamodb2.tokens.Token): Returns: bool: True if token is a possible", "that was correctly parsed or return empty string\"\"\" if self.token_pos", "@classmethod def _is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return UpdateExpressionOperandParser", "to descent into a MAP. We will call each descend", "be created. \"\"\" def _create_node(self): \"\"\" target_clauses has the nodes", "while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self): if self.get_next_token_type() == Token.ATTRIBUTE:", "return \"\" def get_following_token_type(self): \"\"\"Get the token type after the", "skip optional spaces - pass closing bracket \"\"\" self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value", "of the parsing\"\"\" return self.token_pos == len(self.token_list) def is_at_start(self): \"\"\"Return", "UpdateExpressionAttributeValueOrPathParser, ], \"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod def _is_possible_start(cls, token):", "self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token()", "assert self.token_pos > 0, \"We should always have positive indexes\"", "a keyword\"\"\" return token.type == Token.ATTRIBUTE and token.value.upper() == \"REMOVE\"", "= UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod", "so end is reached\") def process_token_of_type(self, token_type): \"\"\" Maker sure", ")._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls, token):", "NestableExpression => TargetClause* NestableExpression => TargetClause* NestableExpression This pattern comes", "else: self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self): return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def", "BinOp Operand / | | | | UpdateExpressionValue BinOp Operand", "value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod", "factory that gets the possible binary operation. Returns: class: A", "token \"\"\" try: return self.get_next_token().type except AttributeError: return None def", "the start of a function. \"\"\" if token.type == Token.ATTRIBUTE:", "if self.token_pos > 0: return self.token_list[self.token_pos - 1].value else: return", "\"\"\" For nodes that can be nested in themselves (recursive)", "SetAction => Path = Value So we create an UpdateExpressionSetAction", "in self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node, self.token_pos = factory( **self._initializer_args() )._parse_with_pos()", "skip whitespace if there are any Returns: [path, value]: A", "return False def _parse(self): \"\"\" Update Expression is the top-most", "token type that was correctly parsed or return None\"\"\" if", "Operand* => GroupedValue \"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionAttributeValueParser,", "_operand_factory_class(self): return UpdateExpressionOperandParser def _binop_factory_class(self): return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): \"\"\"", "UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos = UpdateExpressionValueParser(", "class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos =", "token.\".format( class_name=cls._nestable_class().__name__ ) ) @classmethod @abstractmethod def _nestable_class(cls): return UpdateExpressionSetActions", "self._parse() def get_next_token_type(self): \"\"\" Get the type of the next", "- SELECTORs: E.g.: [1] These are used to select an", "split up with spaces Attributes and attribute_names must be separated", "actions: - skip opening bracket - skip optional spaces -", "self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return [path, value] class", "{fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__ ) ) # noinspection PyProtectedMember ast, token_pos", "return token @classmethod def _is_possible_start(cls, token): return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token),", "_sub_factories(cls): return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod def", "backwards and build the tree bottom up. This way left-deep-descending", "more next token \"\"\" try: return self.get_next_token().value except AttributeError: return", "token_type): \"\"\" Maker sure the next token is of type", "Value => Operand* Value => Operand* + Value Value =>", "in UpdateExpression's. \"\"\" def __init__(self, *args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs)", "return token.value in cls.FUNCTIONS.keys() else: return False def _parse(self): function_name", "\"\"\"Get the token type after the one that is being", "class UpdateExpressionSetClauseParser(ExpressionParser): \"\"\" UpdateExpressionSetClause => SET SetActions \"\"\" @classmethod def", "parsing with specific factory class {fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__ ) )", "structure. When a DOT is in a path expression it", "This is implemented by the calling class. See ExpressionParser for", "SetActions \"\"\" @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE", "factory type. Returns: moto.dynamodb2.ast_nodes.Node: AST which is root node of", "after the one that is being parsed or empty string", "Value => Operand* - Value If we consider it of", "\"\"\" if self.get_next_token_type() == token_type: token_value = self.get_next_token_value() self.goto_next_significant_token() return", "case sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] function_arguments", "build the tree bottom up. This way left-deep-descending traversal will", "the one that is being parsed or empty string if", "attribute name but always means to descent into a MAP.", "checked Returns: bool: Whether the token could be the start", "return self.token_list[self.token_pos + 1].type except IndexError: return None def get_2nd_following_token_value_if_following_was_whitespace(self):", "0, \"We should always have positive indexes\" logging.debug(\"We are out", "to specify a part within an Item. DynamoDB does not", "skip whitespace if there are any 3) Process value \"\"\"", "type of the next token to be processed Returns: str:", "from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer", "a path expression it is never part of an attribute", "logging from abc import abstractmethod import abc import six from", "by the factory. \"\"\" assert len(self.target_clauses) > 0, \"No nodes", "assert len(self.target_clauses) > 0, \"No nodes for {cn}\".format( cn=self.__class__.__name__ )", "Update Expression is the top-most node therefore it is expected", "return the resulting token_pos. Returns: (ast, token_pos): tuple of AST", "UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): \"\"\" UpdateExpressionAddActionParser only gets", "This Mixin adds re-usability for that type of pattern. This", "end of the parsing\"\"\" return self.token_pos == len(self.token_list) def is_at_start(self):", "*args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes = deque() def _parse_target_clause(self,", "parser for the query part that creates the nested nodes\"\"\"", "except IndexError: return None def get_next_token_value(self): \"\"\" Get the value", "type and also return the resulting token_pos. Returns: (ast, token_pos):", "to make sure remove is not passed\"\"\" return True return", "for parser in cls._sub_factories()) def _parse(self): for factory in self._sub_factories():", "the next token to be processed Returns: moto.dynamodb2.tokens.Token: or None", "= UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return [path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser):", "of the initializer. This is implemented by the calling class.", "much restrictions on the data it stores but it does", "UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else: token, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos()", "and the AttributeValue nodes \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args()", "_create_node(self): \"\"\" target_clauses has the nodes in order of encountering.", "[ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod def _is_possible_start(cls, token):", "return UpdateExpression def _parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for", "== len(self.token_list) def is_at_start(self): \"\"\"Return boolean indicating whether we are", ") ) @classmethod @abstractmethod def _nestable_class(cls): return UpdateExpressionSetActions @classmethod @abstractmethod", "until no more SetAction. \"\"\" self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ):", "has no special characters - ATTRIBUTE_NAME: A placeholder that has", "0: return self.token_list[self.token_pos - 1].value else: return \"\" def get_last_token_type(self):", ") return target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser: \"\"\"Abstract class\"\"\" def __init__(self,", "self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return self._create_node() @abstractmethod def", "_is_possible_start(cls, token): raise RuntimeError( \"{class_name} cannot be identified by the", "NestableExpression This pattern comes back multiple times. This Mixin adds", "@abstractmethod def _nested_expression_parser_class(cls): \"\"\"Returns the parser for the query part", "factory for the target clause e.g. UpdateExpressionSetClauseParser Returns: \"\"\" logging.debug(", "[function_name] function_arguments = self.FUNCTIONS[function_name] for i, func_elem_factory in enumerate(function_arguments): func_elem,", "self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember", "return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction => Path = Value", "factory in self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node, self.token_pos = factory( **self._initializer_args()", "is_at_end(self): \"\"\"Return boolean indicating whether we are at end of", "abc import six from collections import deque from moto.dynamodb2.parsing.ast_nodes import", ">> - >> :val2 ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an", "is of type `token_type` if not raise unexpected token Args:", "return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast,", "do the following actions: - skip opening bracket - skip", "NestableBinExpression This pattern comes back multiple times. This Mixin adds", "NestableBinExpression => TargetClause* BinOp NestableBinExpression This pattern comes back multiple", "the token value after the one that is being parsed", "self.get_next_token_type() == Token.WHITESPACE: self.token_pos += 1 except IndexError: assert self.token_pos", "Value => Operand* + Value Value => Operand* - Value", "**kwargs) self.path_nodes = [] @classmethod def _is_possible_start(cls, token): \"\"\" Args:", "and attribute_names must be separated with DOT's. Returns: UpdateExpressionPath: \"\"\"", "], \"list_append\": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod def _is_possible_start(cls, token): \"\"\"", "_parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token()", "return self.token_pos == len(self.token_list) def is_at_start(self): \"\"\"Return boolean indicating whether", "next token \"\"\" try: return self.token_list[self.token_pos] except IndexError: return None", "initializer. This is implemented by the calling class. See ExpressionParser", "Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def _is_possible_start(cls,", "continue parsing with specific factory class {fc}\".format( pos=self.token_pos, fc=factory_class.__class__.__name__ )", "def _parse(self): \"\"\" UpdateExpressionRemoveActionParser only gets called when expecting a", "_parse_target_clause(self, factory_class): \"\"\" Args: factory_class: The factory for the target", "**self._initializer_args() )._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert", "a=3 REMOVE b UpdateExpression / \\ SET a=3 UpdateExpression |", "node of resulting abstract syntax tree and token_pos is the", "self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember", "== Token.OPEN_SQUARE_BRACKET def process_selector(self): \"\"\" Process the selector is only", "by round brackets. Each Operand can be a grouped value", "ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): \"\"\" Grammar Operand* => AttributeValue Operand* =>", "the initializer. This is implemented by the calling class. See", "self.token_pos} @abstractmethod def _parse(self): \"\"\" Start parsing the token_list from", "token.type == Token.ATTRIBUTE and token.value.upper() == \"SET\" def _parse(self): assert", "_nestable_class(cls): return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): \"\"\" RemoveAction => Path =", "self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): \"\"\" UpdateExpressionSetActions \"\"\" @classmethod def _nested_expression_parser_class(cls): return", "nested nodes\"\"\" def _parse(self): \"\"\" UpdateExpressionSetActions is inside the expression", "whether we are at start of the parsing\"\"\" return self.token_pos", "self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token() else:", "not impose much restrictions on the data it stores but", "self.FUNCTIONS[function_name] for i, func_elem_factory in enumerate(function_arguments): func_elem, self.token_pos = func_elem_factory(", "cannot be split up with spaces Attributes and attribute_names must", "self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token() else: break if", "is root node of resulting abstract syntax tree \"\"\" @classmethod", "Token.ATTRIBUTE: return token.value in cls.FUNCTIONS.keys() else: return False def _parse(self):", "GroupedValue \"\"\" @classmethod def _sub_factories(cls): return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser,", "2nd last token that was correctly parsed if last one", "Operand* Value => Operand* + Value Value => Operand* -", "ordering of the Nodes as how the corresponding tokens where", "\"\"\"We have to make sure remove is not passed\"\"\" return", "from token_pos for the factory type. Returns: moto.dynamodb2.ast_nodes.Node: AST which", "_parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for sub_factory in self._sub_factories():", "3) Process value \"\"\" path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos()", ")._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls, token): return token.type", "So we should be aggressive on raising invalid Tokens. We", "if no more next token \"\"\" try: return self.token_list[self.token_pos] except", "] @classmethod def _is_possible_start(cls, token): return any(parser.is_possible_start(token) for parser in", "try: return self.token_list[self.token_pos + 2].value except IndexError: return \"\" else:", "SetAction. So we should be aggressive on raising invalid Tokens.", "inside the expression so it can be followed by others.", "function_arguments = self.FUNCTIONS[function_name] for i, func_elem_factory in enumerate(function_arguments): func_elem, self.token_pos", "token_pos @abstractmethod def _initializer_args(self): \"\"\" Get the arguments of the", "value]: A list containing the Path node and the AttributeValue", "get_next_token(self): \"\"\" Get the next token to be processed Returns:", ") while len(self.target_nodes) >= 2: target_node = UpdateExpressionValue( children=[ target_node,", "class UpdateExpressionFunctionParser(ExpressionParser): \"\"\" A helper to process a function of", "Update Expression \"\"\" # Map function to the factories for", "= factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( \"Continue where previous parsing ended {token_pos}\".format(", "expression. \"\"\" while True: self.skip_white_space() if self.is_at_end(): logging.debug(\"End reached\") break", "@classmethod def _nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self):", "def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class", "between all these elements that build a path. For SELECTORs", "# to refer to attributes that have a name that", "def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token())", "self.token_pos == 0 def get_last_token_value(self): \"\"\"Get the last token that", "def get_following_token_value(self): \"\"\"Get the token value after the one that", "pos {pos} to continue parsing with specific factory class {fc}\".format(", "2].value except IndexError: return \"\" else: return \"\" def skip_white_space(self):", "node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE)", "if self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value()))", "ast, self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return", "characters - ATTRIBUTE_NAME: A placeholder that has no special characters", "self.target_clauses.append(ast) logging.debug( \"Continue where previous parsing ended {token_pos}\".format( token_pos=token_pos )", "by one until no more SetAction. \"\"\" self.skip_white_space() while self._nested_expression_parser_class().is_possible_start(", "is only called when a selector must be processed. So", "be aggressive on raising invalid Tokens. We can thus do" ]
[ "Constraints LatticeOpt LatticeOpt LatticeOpt LatticeOpt FixAngles FixAngles FixAngles FixAngles FixLengths", "row = 0 self[\"optimization frame\"].grid(row=row, column=1, sticky=tk.EW) row += 1", "self[\"optimization method\"] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1", "w = self[widget] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row +=", "MaxSteps MaxSteps MaxSteps OutputPrefix OutputPrefix OutputPrefix OutputPrefix AppendGeometries AppendGeometries AppendGeometries", "w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif \"LBFGS\" in", "import tkinter as tk import tkinter.ttk as ttk import dftbplus_step", "ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha Memory Generations LineSearch \"\"\" #", "= self[\"optimization frame\"] for slave in frame.grid_slaves(): slave.grid_forget() method =", "add our dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def", "widget in ( \"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\", ): w =", "# And the widgets in our frame self.reset_optimization_frame() return row", "self.results_widgets = [] super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w,", "noqa: E501 frame = self[\"optimization frame\"] for slave in frame.grid_slaves():", "calculation=\"optimization\" ): \"\"\"Create the dialog!\"\"\" self.logger.debug(\"Creating the dialog\") super().create_dialog(title=title, calculation=calculation)", "row += 1 if method == \"Steepest descents\": w =", "row += 1 for widget in ( \"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\",", "MaxSteps MaxSteps OutputPrefix OutputPrefix OutputPrefix OutputPrefix AppendGeometries AppendGeometries AppendGeometries AppendGeometries", "MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly", "TkOptimization(dftbplus_step.TkEnergy): def __init__( self, tk_flowchart=None, node=None, canvas=None, x=120, y=20, w=200,", "= self[\"Alpha\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w", "the dialog\") super().create_dialog(title=title, calculation=calculation) # Create all the widgets P", "h=50, my_logger=logger, keyword_metadata=None, ): \"\"\"Initialize the graphical Tk DFTB+ optimization", "method == \"Steepest descents\": w = self[\"StepSize\"] w.grid(row=row, column=1, sticky=tk.EW)", "Optimization node\"\"\" import logging import tkinter as tk import tkinter.ttk", "0 self[\"optimization frame\"].grid(row=row, column=1, sticky=tk.EW) row += 1 # And", "import tkinter.ttk as ttk import dftbplus_step logger = logging.getLogger(__name__) class", "def create_dialog( self, title=\"Edit DFTB+ Optimization Step\", calculation=\"optimization\" ): \"\"\"Create", "in dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame) self.logger.debug(\"Finished creating the dialog\") def", "tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, )", "ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha Memory Generations LineSearch \"\"\" # noqa:", "\"Steepest descents\": w = self[\"StepSize\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row", "CG gDIIS LBFGS FIRE ------------------ ------------------- ------------------- ------------------- -------- MovedAtoms", "------------------- ------------------- ------------------- -------- MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep MaxForceComponent", "tkinter as tk import tkinter.ttk as ttk import dftbplus_step logger", "our frame self.reset_optimization_frame() return row def reset_optimization_frame(self): \"\"\"Layout the optimization", "calculation=calculation) # Create all the widgets P = self.node.parameters #", "borderwidth=4, relief=\"sunken\", text=\"Optimization Parameters\", labelanchor=\"n\", padding=10, ) for key in", "Isotropic Isotropic Isotropic Pressure Pressure Pressure Pressure MaxAtomStep MaxAtomStep MaxAtomStep", "-*- coding: utf-8 -*- \"\"\"The graphical part of a DFTB+", "self[widget] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 return", "+= 1 elif \"gDIIS\" in method: w = self[\"Alpha\"] w.grid(row=row,", "step Keyword arguments: \"\"\" self.results_widgets = [] super().__init__( tk_flowchart=tk_flowchart, node=node,", "optimization step Keyword arguments: \"\"\" self.results_widgets = [] super().__init__( tk_flowchart=tk_flowchart,", "Frame to isolate widgets opt_frame = self[\"optimization frame\"] = ttk.LabelFrame(", "MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha", "= self[\"Memory\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w", "------------------ ------------------- ------------------- ------------------- -------- MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep", "w=200, h=50, my_logger=logger, keyword_metadata=None, ): \"\"\"Initialize the graphical Tk DFTB+", "= self[\"optimization method\"].get() widgets = [] widgets1 = [] row", "AppendGeometries AppendGeometries Constraints Constraints Constraints Constraints LatticeOpt LatticeOpt LatticeOpt LatticeOpt", "keyword_metadata=keyword_metadata, ) def right_click(self, event): \"\"\"Probably need to add our", "sticky=tk.EW) widgets1.append(w) row += 1 elif \"gDIIS\" in method: w", "need to add our dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root,", "+= 1 if method == \"Steepest descents\": w = self[\"StepSize\"]", "FixAngles FixLengths Isotropic Isotropic Isotropic Isotropic Pressure Pressure Pressure Pressure", "MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize", "Optimization Step\", calculation=\"optimization\" ): \"\"\"Create the dialog!\"\"\" self.logger.debug(\"Creating the dialog\")", "slave.grid_forget() method = self[\"optimization method\"].get() widgets = [] widgets1 =", "row = 0 w = self[\"optimization method\"] w.grid(row=row, column=0, columnspan=2,", "LatticeOpt FixAngles FixAngles FixAngles FixAngles FixLengths Isotropic Isotropic Isotropic Isotropic", "def __init__( self, tk_flowchart=None, node=None, canvas=None, x=120, y=20, w=200, h=50,", "Isotropic Isotropic Pressure Pressure Pressure Pressure MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep", "\"gDIIS\" in method: w = self[\"Alpha\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w)", "super().reset_dialog() row = 0 self[\"optimization frame\"].grid(row=row, column=1, sticky=tk.EW) row +=", "method\"].get() widgets = [] widgets1 = [] row = 0", "MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha Memory Generations LineSearch", "keyword_metadata=None, ): \"\"\"Initialize the graphical Tk DFTB+ optimization step Keyword", "widgets opt_frame = self[\"optimization frame\"] = ttk.LabelFrame( self[\"frame\"], borderwidth=4, relief=\"sunken\",", "+= 1 # And the widgets in our frame self.reset_optimization_frame()", "to the current values. SD CG gDIIS LBFGS FIRE ------------------", "self[\"optimization frame\"] for slave in frame.grid_slaves(): slave.grid_forget() method = self[\"optimization", "\"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\", ): w = self[widget] w.grid(row=row, column=0,", "self.logger.debug(\"Finished creating the dialog\") def reset_dialog(self, widget=None): super().reset_dialog() row =", ") for key in dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame) self.logger.debug(\"Finished creating", "w = self[\"LineSearch\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1", "reset_dialog(self, widget=None): super().reset_dialog() row = 0 self[\"optimization frame\"].grid(row=row, column=1, sticky=tk.EW)", "self[\"frame\"], borderwidth=4, relief=\"sunken\", text=\"Optimization Parameters\", labelanchor=\"n\", padding=10, ) for key", "super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata,", "self[key] = P[key].widget(opt_frame) self.logger.debug(\"Finished creating the dialog\") def reset_dialog(self, widget=None):", "LatticeOpt LatticeOpt FixAngles FixAngles FixAngles FixAngles FixLengths Isotropic Isotropic Isotropic", "FixAngles FixAngles FixAngles FixLengths Isotropic Isotropic Isotropic Isotropic Pressure Pressure", "the widgets in our frame self.reset_optimization_frame() return row def reset_optimization_frame(self):", "1 for widget in ( \"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\", ):", "isolate widgets opt_frame = self[\"optimization frame\"] = ttk.LabelFrame( self[\"frame\"], borderwidth=4,", "( \"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\", ): w = self[widget] w.grid(row=row,", "as tk import tkinter.ttk as ttk import dftbplus_step logger =", "def reset_dialog(self, widget=None): super().reset_dialog() row = 0 self[\"optimization frame\"].grid(row=row, column=1,", "node=node, canvas=canvas, x=x, y=y, w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, ) def", "arguments: \"\"\" self.results_widgets = [] super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x,", "MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps", "self[\"Memory\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w =", "\"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\", ): w = self[widget] w.grid(row=row, column=0, columnspan=2,", "------------------- ------------------- -------- MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep MaxForceComponent MaxForceComponent", "= [] row = 0 w = self[\"optimization method\"] w.grid(row=row,", "ttk import dftbplus_step logger = logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def __init__(", "LatticeOpt LatticeOpt LatticeOpt FixAngles FixAngles FixAngles FixAngles FixLengths Isotropic Isotropic", "1 # And the widgets in our frame self.reset_optimization_frame() return", "method: w = self[\"Alpha\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row +=", "[] super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h, my_logger=my_logger,", "self[\"Alpha\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w =", "FIRE ------------------ ------------------- ------------------- ------------------- -------- MovedAtoms MovedAtoms MovedAtoms MovedAtoms", "): \"\"\"Create the dialog!\"\"\" self.logger.debug(\"Creating the dialog\") super().create_dialog(title=title, calculation=calculation) #", "the graphical Tk DFTB+ optimization step Keyword arguments: \"\"\" self.results_widgets", "widgets1.append(w) row += 1 w = self[\"LineSearch\"] w.grid(row=row, column=1, sticky=tk.EW)", "MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly", "method\"] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 if", "E501 frame = self[\"optimization frame\"] for slave in frame.grid_slaves(): slave.grid_forget()", "x=x, y=y, w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, ) def right_click(self, event):", "== \"Steepest descents\": w = self[\"StepSize\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w)", "Create all the widgets P = self.node.parameters # Frame to", "Isotropic Isotropic Isotropic Isotropic Pressure Pressure Pressure Pressure MaxAtomStep MaxAtomStep", "self.reset_optimization_frame() return row def reset_optimization_frame(self): \"\"\"Layout the optimization frame according", "canvas=canvas, x=x, y=y, w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, ) def right_click(self,", "y=y, w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, ) def right_click(self, event): \"\"\"Probably", "self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def create_dialog( self, title=\"Edit DFTB+", "widgets1.append(w) row += 1 w = self[\"Generations\"] w.grid(row=row, column=1, sticky=tk.EW)", "row += 1 elif \"gDIIS\" in method: w = self[\"Alpha\"]", "event.y_root, 0) def create_dialog( self, title=\"Edit DFTB+ Optimization Step\", calculation=\"optimization\"", "ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha Memory Generations LineSearch \"\"\"", "the widgets P = self.node.parameters # Frame to isolate widgets", "dialog!\"\"\" self.logger.debug(\"Creating the dialog\") super().create_dialog(title=title, calculation=calculation) # Create all the", "= self[\"optimization method\"] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row +=", "w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 if method", "# Frame to isolate widgets opt_frame = self[\"optimization frame\"] =", "Constraints Constraints Constraints LatticeOpt LatticeOpt LatticeOpt LatticeOpt FixAngles FixAngles FixAngles", "the current values. SD CG gDIIS LBFGS FIRE ------------------ -------------------", "for widget in ( \"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\", ): w", "\"\"\"Layout the optimization frame according to the current values. SD", "-------- MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent", "as ttk import dftbplus_step logger = logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def", "self, tk_flowchart=None, node=None, canvas=None, x=120, y=20, w=200, h=50, my_logger=logger, keyword_metadata=None,", "node\"\"\" import logging import tkinter as tk import tkinter.ttk as", "for slave in frame.grid_slaves(): slave.grid_forget() method = self[\"optimization method\"].get() widgets", "= logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def __init__( self, tk_flowchart=None, node=None, canvas=None,", "of a DFTB+ Optimization node\"\"\" import logging import tkinter as", "self.node.parameters # Frame to isolate widgets opt_frame = self[\"optimization frame\"]", "self[\"optimization frame\"].grid(row=row, column=1, sticky=tk.EW) row += 1 # And the", "StepSize Alpha Memory Generations LineSearch \"\"\" # noqa: E501 frame", "self.logger.debug(\"Creating the dialog\") super().create_dialog(title=title, calculation=calculation) # Create all the widgets", "canvas=None, x=120, y=20, w=200, h=50, my_logger=logger, keyword_metadata=None, ): \"\"\"Initialize the", "self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def create_dialog( self, title=\"Edit DFTB+ Optimization Step\",", "self[\"optimization method\"].get() widgets = [] widgets1 = [] row =", "sticky=tk.EW) widgets1.append(w) row += 1 for widget in ( \"MaxForceComponent\",", "the dialog\") def reset_dialog(self, widget=None): super().reset_dialog() row = 0 self[\"optimization", "creating the dialog\") def reset_dialog(self, widget=None): super().reset_dialog() row = 0", "MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps MaxSteps MaxSteps MaxSteps OutputPrefix OutputPrefix", "columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 if method == \"Steepest", "Keyword arguments: \"\"\" self.results_widgets = [] super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas,", "w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, ) def right_click(self, event): \"\"\"Probably need", "-*- \"\"\"The graphical part of a DFTB+ Optimization node\"\"\" import", "coding: utf-8 -*- \"\"\"The graphical part of a DFTB+ Optimization", "MaxSteps MaxSteps MaxSteps MaxSteps OutputPrefix OutputPrefix OutputPrefix OutputPrefix AppendGeometries AppendGeometries", "And the widgets in our frame self.reset_optimization_frame() return row def", "reset_optimization_frame(self): \"\"\"Layout the optimization frame according to the current values.", "= [] super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h,", "w = self[\"optimization method\"] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row", "frame according to the current values. SD CG gDIIS LBFGS", "# noqa: E501 frame = self[\"optimization frame\"] for slave in", "1 elif \"gDIIS\" in method: w = self[\"Alpha\"] w.grid(row=row, column=1,", "= self[widget] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1", "logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def __init__( self, tk_flowchart=None, node=None, canvas=None, x=120,", "Memory Generations LineSearch \"\"\" # noqa: E501 frame = self[\"optimization", "1 w = self[\"LineSearch\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row +=", "= 0 self[\"optimization frame\"].grid(row=row, column=1, sticky=tk.EW) row += 1 #", "widgets1 = [] row = 0 w = self[\"optimization method\"]", "self[\"Generations\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif \"LBFGS\"", "tk import tkinter.ttk as ttk import dftbplus_step logger = logging.getLogger(__name__)", "import dftbplus_step logger = logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def __init__( self,", "= self[\"LineSearch\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 for", "frame\"].grid(row=row, column=1, sticky=tk.EW) row += 1 # And the widgets", "w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 for widget in", "import logging import tkinter as tk import tkinter.ttk as ttk", "w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w = self[\"Generations\"]", "elif \"LBFGS\" in method: w = self[\"Memory\"] w.grid(row=row, column=1, sticky=tk.EW)", "part of a DFTB+ Optimization node\"\"\" import logging import tkinter", "x=120, y=20, w=200, h=50, my_logger=logger, keyword_metadata=None, ): \"\"\"Initialize the graphical", "Generations LineSearch \"\"\" # noqa: E501 frame = self[\"optimization frame\"]", "utf-8 -*- \"\"\"The graphical part of a DFTB+ Optimization node\"\"\"", "w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w = self[\"LineSearch\"]", "\"\"\"The graphical part of a DFTB+ Optimization node\"\"\" import logging", "widgets1.append(w) row += 1 for widget in ( \"MaxForceComponent\", \"MaxSteps\",", "key in dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame) self.logger.debug(\"Finished creating the dialog\")", "the optimization frame according to the current values. SD CG", "FixAngles FixAngles FixAngles FixAngles FixLengths Isotropic Isotropic Isotropic Isotropic Pressure", "0) def create_dialog( self, title=\"Edit DFTB+ Optimization Step\", calculation=\"optimization\" ):", "gDIIS LBFGS FIRE ------------------ ------------------- ------------------- ------------------- -------- MovedAtoms MovedAtoms", "column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 if method ==", "method: w = self[\"Memory\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row +=", "in method: w = self[\"Alpha\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row", "create_dialog( self, title=\"Edit DFTB+ Optimization Step\", calculation=\"optimization\" ): \"\"\"Create the", "0 w = self[\"optimization method\"] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w)", "sticky=tk.EW) widgets1.append(w) row += 1 elif \"LBFGS\" in method: w", "SD CG gDIIS LBFGS FIRE ------------------ ------------------- ------------------- ------------------- --------", "sticky=tk.EW) widgets1.append(w) row += 1 w = self[\"Generations\"] w.grid(row=row, column=1,", "command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def create_dialog( self, title=\"Edit DFTB+ Optimization", "Constraints Constraints LatticeOpt LatticeOpt LatticeOpt LatticeOpt FixAngles FixAngles FixAngles FixAngles", "super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def create_dialog( self, title=\"Edit", "in our frame self.reset_optimization_frame() return row def reset_optimization_frame(self): \"\"\"Layout the", "column=1, sticky=tk.EW) widgets1.append(w) row += 1 for widget in (", "OutputPrefix OutputPrefix AppendGeometries AppendGeometries AppendGeometries AppendGeometries Constraints Constraints Constraints Constraints", "event): \"\"\"Probably need to add our dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit)", "dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def create_dialog( self,", "in method: w = self[\"Memory\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row", "in ( \"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\", ): w = self[widget]", "all the widgets P = self.node.parameters # Frame to isolate", "[] widgets1 = [] row = 0 w = self[\"optimization", "slave in frame.grid_slaves(): slave.grid_forget() method = self[\"optimization method\"].get() widgets =", "dialog\") def reset_dialog(self, widget=None): super().reset_dialog() row = 0 self[\"optimization frame\"].grid(row=row,", "row += 1 w = self[\"LineSearch\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w)", "column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif \"LBFGS\" in method:", "MovedAtoms MovedAtoms MovedAtoms TimeStep MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps MaxSteps", "= [] widgets1 = [] row = 0 w =", "LatticeOpt LatticeOpt LatticeOpt LatticeOpt FixAngles FixAngles FixAngles FixAngles FixLengths Isotropic", "dialog\") super().create_dialog(title=title, calculation=calculation) # Create all the widgets P =", "AppendGeometries Constraints Constraints Constraints Constraints LatticeOpt LatticeOpt LatticeOpt LatticeOpt FixAngles", "frame\"] for slave in frame.grid_slaves(): slave.grid_forget() method = self[\"optimization method\"].get()", "+= 1 w = self[\"Generations\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row", "frame\"] = ttk.LabelFrame( self[\"frame\"], borderwidth=4, relief=\"sunken\", text=\"Optimization Parameters\", labelanchor=\"n\", padding=10,", "AppendGeometries AppendGeometries AppendGeometries AppendGeometries Constraints Constraints Constraints Constraints LatticeOpt LatticeOpt", "class TkOptimization(dftbplus_step.TkEnergy): def __init__( self, tk_flowchart=None, node=None, canvas=None, x=120, y=20,", "descents\": w = self[\"StepSize\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row +=", "Step\", calculation=\"optimization\" ): \"\"\"Create the dialog!\"\"\" self.logger.debug(\"Creating the dialog\") super().create_dialog(title=title,", "Alpha Memory Generations LineSearch \"\"\" # noqa: E501 frame =", "return row def reset_optimization_frame(self): \"\"\"Layout the optimization frame according to", "P[key].widget(opt_frame) self.logger.debug(\"Finished creating the dialog\") def reset_dialog(self, widget=None): super().reset_dialog() row", "FixLengths Isotropic Isotropic Isotropic Isotropic Pressure Pressure Pressure Pressure MaxAtomStep", "super().create_dialog(title=title, calculation=calculation) # Create all the widgets P = self.node.parameters", "+= 1 w = self[\"LineSearch\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row", "+= 1 elif \"LBFGS\" in method: w = self[\"Memory\"] w.grid(row=row,", "elif \"gDIIS\" in method: w = self[\"Alpha\"] w.grid(row=row, column=1, sticky=tk.EW)", "self[\"LineSearch\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 for widget", "= self[\"Generations\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif", "padding=10, ) for key in dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame) self.logger.debug(\"Finished", "relief=\"sunken\", text=\"Optimization Parameters\", labelanchor=\"n\", padding=10, ) for key in dftbplus_step.OptimizationParameters.parameters:", "MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha Memory", "dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame) self.logger.debug(\"Finished creating the dialog\") def reset_dialog(self,", "values. SD CG gDIIS LBFGS FIRE ------------------ ------------------- ------------------- -------------------", "MovedAtoms MovedAtoms TimeStep MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps MaxSteps MaxSteps", "widgets = [] widgets1 = [] row = 0 w", "w = self[\"Generations\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1", "ConvergentForcesOnly StepSize Alpha Memory Generations LineSearch \"\"\" # noqa: E501", "Pressure Pressure Pressure MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep", "Tk DFTB+ optimization step Keyword arguments: \"\"\" self.results_widgets = []", "FixAngles FixAngles FixLengths Isotropic Isotropic Isotropic Isotropic Pressure Pressure Pressure", "sticky=tk.EW) widgets1.append(w) row += 1 w = self[\"LineSearch\"] w.grid(row=row, column=1,", "for key in dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame) self.logger.debug(\"Finished creating the", "): \"\"\"Initialize the graphical Tk DFTB+ optimization step Keyword arguments:", "column=1, sticky=tk.EW) widgets1.append(w) row += 1 w = self[\"LineSearch\"] w.grid(row=row,", "\"MaxAtomStep\", \"stop_if_scc_fails\", ): w = self[widget] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)", "1 w = self[\"Generations\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row +=", "frame = self[\"optimization frame\"] for slave in frame.grid_slaves(): slave.grid_forget() method", "title=\"Edit DFTB+ Optimization Step\", calculation=\"optimization\" ): \"\"\"Create the dialog!\"\"\" self.logger.debug(\"Creating", "Pressure MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly", "row += 1 elif \"LBFGS\" in method: w = self[\"Memory\"]", "P = self.node.parameters # Frame to isolate widgets opt_frame =", "OutputPrefix OutputPrefix OutputPrefix OutputPrefix AppendGeometries AppendGeometries AppendGeometries AppendGeometries Constraints Constraints", "# -*- coding: utf-8 -*- \"\"\"The graphical part of a", "__init__( self, tk_flowchart=None, node=None, canvas=None, x=120, y=20, w=200, h=50, my_logger=logger,", "logging import tkinter as tk import tkinter.ttk as ttk import", ") def right_click(self, event): \"\"\"Probably need to add our dialog...\"\"\"", "column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif \"gDIIS\" in method:", "row def reset_optimization_frame(self): \"\"\"Layout the optimization frame according to the", "= self[\"optimization frame\"] = ttk.LabelFrame( self[\"frame\"], borderwidth=4, relief=\"sunken\", text=\"Optimization Parameters\",", "opt_frame = self[\"optimization frame\"] = ttk.LabelFrame( self[\"frame\"], borderwidth=4, relief=\"sunken\", text=\"Optimization", "= P[key].widget(opt_frame) self.logger.debug(\"Finished creating the dialog\") def reset_dialog(self, widget=None): super().reset_dialog()", "according to the current values. SD CG gDIIS LBFGS FIRE", "row += 1 w = self[\"Generations\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w)", "\"\"\"Initialize the graphical Tk DFTB+ optimization step Keyword arguments: \"\"\"", "MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps MaxSteps MaxSteps MaxSteps OutputPrefix OutputPrefix OutputPrefix", "graphical Tk DFTB+ optimization step Keyword arguments: \"\"\" self.results_widgets =", "widget=None): super().reset_dialog() row = 0 self[\"optimization frame\"].grid(row=row, column=1, sticky=tk.EW) row", "Pressure Pressure MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly", "\"\"\" # noqa: E501 frame = self[\"optimization frame\"] for slave", "right_click(self, event): \"\"\"Probably need to add our dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\",", "): w = self[widget] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row", "MovedAtoms TimeStep MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps MaxSteps MaxSteps MaxSteps", "logger = logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def __init__( self, tk_flowchart=None, node=None,", "my_logger=logger, keyword_metadata=None, ): \"\"\"Initialize the graphical Tk DFTB+ optimization step", "# Create all the widgets P = self.node.parameters # Frame", "1 if method == \"Steepest descents\": w = self[\"StepSize\"] w.grid(row=row,", "w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 return row", "the dialog!\"\"\" self.logger.debug(\"Creating the dialog\") super().create_dialog(title=title, calculation=calculation) # Create all", "= self.node.parameters # Frame to isolate widgets opt_frame = self[\"optimization", "\"stop_if_scc_fails\", ): w = self[widget] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w)", "text=\"Optimization Parameters\", labelanchor=\"n\", padding=10, ) for key in dftbplus_step.OptimizationParameters.parameters: self[key]", "column=1, sticky=tk.EW) row += 1 # And the widgets in", "node=None, canvas=None, x=120, y=20, w=200, h=50, my_logger=logger, keyword_metadata=None, ): \"\"\"Initialize", "column=1, sticky=tk.EW) widgets1.append(w) row += 1 w = self[\"Generations\"] w.grid(row=row,", "tkinter.ttk as ttk import dftbplus_step logger = logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy):", "OutputPrefix AppendGeometries AppendGeometries AppendGeometries AppendGeometries Constraints Constraints Constraints Constraints LatticeOpt", "AppendGeometries AppendGeometries AppendGeometries Constraints Constraints Constraints Constraints LatticeOpt LatticeOpt LatticeOpt", "= ttk.LabelFrame( self[\"frame\"], borderwidth=4, relief=\"sunken\", text=\"Optimization Parameters\", labelanchor=\"n\", padding=10, )", "\"LBFGS\" in method: w = self[\"Memory\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w)", "dftbplus_step logger = logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def __init__( self, tk_flowchart=None,", "sticky=tk.EW) row += 1 # And the widgets in our", "DFTB+ optimization step Keyword arguments: \"\"\" self.results_widgets = [] super().__init__(", "Pressure Pressure Pressure Pressure MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep", "w = self[\"StepSize\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1", "w = self[\"Alpha\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1", "frame.grid_slaves(): slave.grid_forget() method = self[\"optimization method\"].get() widgets = [] widgets1", "row += 1 # And the widgets in our frame", "my_logger=my_logger, keyword_metadata=keyword_metadata, ) def right_click(self, event): \"\"\"Probably need to add", "\"\"\"Probably need to add our dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root,", "method = self[\"optimization method\"].get() widgets = [] widgets1 = []", "MaxForceComponent MaxSteps MaxSteps MaxSteps MaxSteps OutputPrefix OutputPrefix OutputPrefix OutputPrefix AppendGeometries", "to isolate widgets opt_frame = self[\"optimization frame\"] = ttk.LabelFrame( self[\"frame\"],", "our dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def create_dialog(", "widgets1.append(w) row += 1 elif \"gDIIS\" in method: w =", "graphical part of a DFTB+ Optimization node\"\"\" import logging import", "[] row = 0 w = self[\"optimization method\"] w.grid(row=row, column=0,", "1 elif \"LBFGS\" in method: w = self[\"Memory\"] w.grid(row=row, column=1,", "tk_flowchart=None, node=None, canvas=None, x=120, y=20, w=200, h=50, my_logger=logger, keyword_metadata=None, ):", "= self[\"StepSize\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif", "in frame.grid_slaves(): slave.grid_forget() method = self[\"optimization method\"].get() widgets = []", "widgets P = self.node.parameters # Frame to isolate widgets opt_frame", "if method == \"Steepest descents\": w = self[\"StepSize\"] w.grid(row=row, column=1,", "MaxForceComponent MaxForceComponent MaxSteps MaxSteps MaxSteps MaxSteps OutputPrefix OutputPrefix OutputPrefix OutputPrefix", "labelanchor=\"n\", padding=10, ) for key in dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame)", "def right_click(self, event): \"\"\"Probably need to add our dialog...\"\"\" super().right_click(event)", "to add our dialog...\"\"\" super().right_click(event) self.popup_menu.add_command(label=\"Edit..\", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0)", "a DFTB+ Optimization node\"\"\" import logging import tkinter as tk", "w = self[\"Memory\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1", "LBFGS FIRE ------------------ ------------------- ------------------- ------------------- -------- MovedAtoms MovedAtoms MovedAtoms", "widgets1.append(w) row += 1 elif \"LBFGS\" in method: w =", "LineSearch \"\"\" # noqa: E501 frame = self[\"optimization frame\"] for", "current values. SD CG gDIIS LBFGS FIRE ------------------ ------------------- -------------------", "optimization frame according to the current values. SD CG gDIIS", "Parameters\", labelanchor=\"n\", padding=10, ) for key in dftbplus_step.OptimizationParameters.parameters: self[key] =", "DFTB+ Optimization node\"\"\" import logging import tkinter as tk import", "MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha Memory Generations", "------------------- -------- MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep MaxForceComponent MaxForceComponent MaxForceComponent", "MaxSteps OutputPrefix OutputPrefix OutputPrefix OutputPrefix AppendGeometries AppendGeometries AppendGeometries AppendGeometries Constraints", "sticky=tk.EW) widgets.append(w) row += 1 if method == \"Steepest descents\":", "Constraints Constraints Constraints Constraints LatticeOpt LatticeOpt LatticeOpt LatticeOpt FixAngles FixAngles", "w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif \"gDIIS\" in", "def reset_optimization_frame(self): \"\"\"Layout the optimization frame according to the current", "DFTB+ Optimization Step\", calculation=\"optimization\" ): \"\"\"Create the dialog!\"\"\" self.logger.debug(\"Creating the", "+= 1 for widget in ( \"MaxForceComponent\", \"MaxSteps\", \"MaxAtomStep\", \"stop_if_scc_fails\",", "TimeStep MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps MaxSteps MaxSteps MaxSteps OutputPrefix", "frame self.reset_optimization_frame() return row def reset_optimization_frame(self): \"\"\"Layout the optimization frame", "h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, ) def right_click(self, event): \"\"\"Probably need to", "self[\"optimization frame\"] = ttk.LabelFrame( self[\"frame\"], borderwidth=4, relief=\"sunken\", text=\"Optimization Parameters\", labelanchor=\"n\",", "self, title=\"Edit DFTB+ Optimization Step\", calculation=\"optimization\" ): \"\"\"Create the dialog!\"\"\"", "widgets in our frame self.reset_optimization_frame() return row def reset_optimization_frame(self): \"\"\"Layout", "y=20, w=200, h=50, my_logger=logger, keyword_metadata=None, ): \"\"\"Initialize the graphical Tk", "\"\"\"Create the dialog!\"\"\" self.logger.debug(\"Creating the dialog\") super().create_dialog(title=title, calculation=calculation) # Create", "self[\"StepSize\"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif \"gDIIS\"", "= 0 w = self[\"optimization method\"] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)", "ttk.LabelFrame( self[\"frame\"], borderwidth=4, relief=\"sunken\", text=\"Optimization Parameters\", labelanchor=\"n\", padding=10, ) for", "Isotropic Pressure Pressure Pressure Pressure MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep", "\"\"\" self.results_widgets = [] super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y,", "OutputPrefix OutputPrefix OutputPrefix AppendGeometries AppendGeometries AppendGeometries AppendGeometries Constraints Constraints Constraints", "widgets.append(w) row += 1 if method == \"Steepest descents\": w" ]
[ "python3 # -*- coding: utf-8 -*- \"\"\"Módulo de configuração dos", "gerando a cada envio um número inteiro referente a quantidade", "quantidade de bytes a serem recebidos Returns: (str) mensagem decifrada", "str): msg = msg.encode('utf-8') msg = self.publickey.encrypt(msg, 3.14159265359) msg =", "msg = self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def encrypt(self, msg): \"\"\"Criptografia de", "envio sequencial de segmentos de um arquivo através de um", "file.write(nxt) yield rcvd file.close() def __repr__(self): return \"{0}({1}, {2}, key_file", "Classe base para os terminais de cliente e servidor. Attributes:", "sock (socket): socket de comunicação key_file (str): arquivo para inicialização", "método controla o envio sequencial de segmentos de um arquivo", "enviados até o momento. Método deve ser usado como um", "início da comunicação Ao se conectarem, servidor e cliente trocam", "Example: for b in receive_file(filename): print(str(b) + \" de \"", "\"\"\"Método receive recebe mensagens simples através do socket É através", "Returns: (bytes) segmento de bytes criptografados \"\"\" if isinstance(msg, str):", "envio um número inteiro referente a quantidade de bytes enviados", "str(file_size) \"bytes enviados\") Args: filename (str): endereço do arquivo Yields:", "else: private_key = RSA.importKey(keyfile.read()) keyfile.close() finally: public_key = private_key.publickey().exportKey() return", "par de chaves \"\"\" self.sock = kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file", "\" de \" str(filesize) \" bytes recebidos.\") Args: filename(str): nome", "um gerador. Example: for b in receive_file(filename): print(str(b) + \"", "key = RSA.importKey(k) return key def send(self, msg): \"\"\"Método send", "chave privada e prepara, também, a chave pública para envio.", "endereço do arquivo da chave privada Returns: (tuple) uma tupla", "self.sock.recv(1024) rcvd += len(nxt) file.write(nxt) yield rcvd file.close() def __repr__(self):", "rcvd file.close() def __repr__(self): return \"{0}({1}, {2}, key_file = {3})\".format(self.__class__.__name__,", "e byte (inicializador da chave pública) \"\"\" try: keyfile =", "False caso contrário \"\"\" def __init__(self, **kwargs): \"\"\"Método construtor do", "público a partir da chave pública recebida através de um", "controla o comportamento do objeto como um todo. Todo o", "= open(filename, 'rb') while sent < size: ack = self.receive()", "mensagem decifrada \"\"\" msg = self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def encrypt(self,", "bytes a serem criptografados. Returns: (bytes) segmento de bytes criptografados", "socket. O método gera a quantidade de bytes recebidos a", "coding: utf-8 -*- \"\"\"Módulo de configuração dos consoles \"\"\" from", "raise NotImplemented @staticmethod def start_key(key_file): \"\"\"Método de inicialização das chaves", "ser enviada \"\"\" msg = self.encrypt(msg) self.sock.send(msg) def receive(self, b", "recebida do socket, por tanto, deve ser usado como um", "gerador. Example: for b in receive_file(filename): print(str(b) + \" de", "def receive_key(self): \"\"\"Troca de chaves no início da comunicação Ao", "o usuário tenha realizado o login com sucesso, False caso", "for b in receive_file(filename): print(str(b) + \" de \" str(filesize)", "através de sockets Esse método controla o recebeimendo de sementos", "arquivos através de um socket. O método gera a quantidade", "e o do Client O Método run controla o comportamento", "o do Client O Método run controla o comportamento do", "método receive. Args: b (int): quantidade de bytes a serem", "\"bytes enviados\") Args: filename (str): endereço do arquivo Yields: (int)", "if isinstance(msg, str): msg = msg.encode('utf-8') msg = self.publickey.encrypt(msg, 3.14159265359)", "\" str(file_size) \"bytes enviados\") Args: filename (str): endereço do arquivo", "len(nxt) yield sent file.close() def receive_file(self, filename): \"\"\"Rotina de recebimento", "socket, por tanto, deve ser usado como um gerador. Example:", "filename): \"\"\"Rotina de recebimento de arquivos através de sockets Esse", "= self.encrypt(msg) self.sock.send(msg) def receive(self, b = 160): \"\"\"Método receive", "public_key def receive_key(self): \"\"\"Troca de chaves no início da comunicação", "como um gerador. Veja exemplo abaixo. Example: for b in", "método que o usuário recebe mensagens simples através do socket.", "bytes enviados até o momento. Método deve ser usado como", "o recebeimendo de sementos de arquivos através de um socket.", "Args: b (int): quantidade de bytes a serem recebidos Returns:", "self.privatekey.decrypt(msg) return msg def send_file(self, filename): \"\"\"Rotina de envio de", "= msg.encode('utf-8') msg = self.publickey.encrypt(msg, 3.14159265359) msg = base64.a85encode(msg[0]) return", "usuário recebe mensagens simples através do socket. As mensagens chegam", "Args: msg (bytes): trecho de mensagem a ser decifrado Returns:", "socket, gerando a cada envio um número inteiro referente a", "-*- coding: utf-8 -*- \"\"\"Módulo de configuração dos consoles \"\"\"", "criptografadas e a descriptografia acontece dentro do método receive. Args:", "de par de chaves \"\"\" self.sock = kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM))", "um console individual deve ser definido dentro do método run.", "dos consoles \"\"\" from Crypto.PublicKey import RSA import socket import", "self.privatekey, self.publickey = Console.start_key(key_file) def run(self): \"\"\"Método run difere entre", "'rb') while sent < size: ack = self.receive() nxt =", "receive_file(self, filename): \"\"\"Rotina de recebimento de arquivos através de sockets", "servidor. Attributes: logged (bool): True caso o usuário tenha realizado", "private_key = RSA.importKey(keyfile.read()) keyfile.close() finally: public_key = private_key.publickey().exportKey() return private_key,", "bytes decifrados \"\"\" msg = base64.a85decode(msg) msg = self.privatekey.decrypt(msg) return", "return msg.decode('utf-8') def encrypt(self, msg): \"\"\"Criptografia de uma string ou", "bytes enviados ou -1, em caso de erro \"\"\" size", "de chaves \"\"\" self.sock = kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file =", "desse método ocorrem as criptografias RSA e base64 antes do", "de bytes criptografados \"\"\" if isinstance(msg, str): msg = msg.encode('utf-8')", "a serem recebidos Returns: (str) mensagem decifrada \"\"\" msg =", "a cada nova mensagem recebida do socket, por tanto, deve", "ou trecho de bytes Args: msg (str ou bytes): string", "def start_key(key_file): \"\"\"Método de inicialização das chaves Esse método inicializa", "no início da comunicação Ao se conectarem, servidor e cliente", "\"\"\" k = self.sock.recv(1024) key = RSA.importKey(k) return key def", "'wb') rcvd = 0 while rcvd < size: self.send('ack') nxt", "= os.path.getsize(filename) self.send(str(size)) sent = 0 file = open(filename, 'rb')", "uma string ou trecho de bytes Args: msg (str ou", "decifrados \"\"\" msg = base64.a85decode(msg) msg = self.privatekey.decrypt(msg) return msg", "através do socket. As mensagens chegam criptografadas e a descriptografia", "Todo o comportamento de um console individual deve ser definido", "e servidor. Attributes: logged (bool): True caso o usuário tenha", "criptografados. Returns: (bytes) segmento de bytes criptografados \"\"\" if isinstance(msg,", "'rb') except FileNotFoundError: private_key = RSA.generate(1024) else: private_key = RSA.importKey(keyfile.read())", "'') if key_file: self.privatekey, self.publickey = Console.start_key(key_file) def run(self): \"\"\"Método", "chaves \"\"\" self.sock = kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file = kwargs.get('key_file',", "segmento de bytes criptografados \"\"\" if isinstance(msg, str): msg =", "(str): arquivo para inicialização de par de chaves \"\"\" self.sock", "do socket. As mensagens chegam criptografadas e a descriptografia acontece", "através de um socket. Returns: (_RSAobj) chave pública para criptografia.", "decifrado Returns: (bytes): trecho de bytes decifrados \"\"\" msg =", "momento. Método deve ser usado como um gerador. Veja exemplo", "serem criptografados. Returns: (bytes) segmento de bytes criptografados \"\"\" if", "deve ser definido dentro do método run. \"\"\" raise NotImplemented", "do tipo RSA público a partir da chave pública recebida", "chegam criptografadas e a descriptografia acontece dentro do método receive.", "a quantidade de bytes recebidos a cada nova mensagem recebida", "rcvd < size: self.send('ack') nxt = self.sock.recv(1024) rcvd += len(nxt)", "envio de arquivos através de sockets Esse método controla o", "< size: ack = self.receive() nxt = file.read(1024) self.sock.send(nxt) sent", "+= len(nxt) file.write(nxt) yield rcvd file.close() def __repr__(self): return \"{0}({1},", "socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file = kwargs.get('key_file', '') if key_file: self.privatekey, self.publickey", "contendo um par _RSAobj (chave privada) e byte (inicializador da", "de chaves no início da comunicação Ao se conectarem, servidor", "prepara, também, a chave pública para envio. Args: key_file (str):", "chave privada Returns: (tuple) uma tupla contendo um par _RSAobj", "nome do arquivo Yields: (int) quantidade de bytes recebidos \"\"\"", "private_key = RSA.generate(1024) else: private_key = RSA.importKey(keyfile.read()) keyfile.close() finally: public_key", "de erro \"\"\" size = os.path.getsize(filename) self.send(str(size)) sent = 0", "criptografias RSA e base64 antes do envio.\" Args: msg (str", "um socket. Dentro desse método ocorrem as criptografias RSA e", "arquivos através de sockets Esse método controla o recebeimendo de", "msg = self.privatekey.decrypt(msg) return msg def send_file(self, filename): \"\"\"Rotina de", "a chave privada e prepara, também, a chave pública para", "arquivo através de um socket, gerando a cada envio um", "(str ou bytes): mensagem a ser enviada \"\"\" msg =", "# -*- coding: utf-8 -*- \"\"\"Módulo de configuração dos consoles", "= self.sock.recv(1024) rcvd += len(nxt) file.write(nxt) yield rcvd file.close() def", "de segmentos de um arquivo através de um socket, gerando", "\"\"\" raise NotImplemented @staticmethod def start_key(key_file): \"\"\"Método de inicialização das", "__init__(self, **kwargs): \"\"\"Método construtor do console Kwargs: sock (socket): socket", "= kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file = kwargs.get('key_file', '') if key_file:", "abaixo. Example: for b in self.sendfile('alice.txt'): if b == -1:", "mensagem a ser decifrado Returns: (bytes): trecho de bytes decifrados", "configuração dos consoles \"\"\" from Crypto.PublicKey import RSA import socket", "receive_key(self): \"\"\"Troca de chaves no início da comunicação Ao se", "apara enviar mensagens simples através de um socket. Dentro desse", "um número inteiro referente a quantidade de bytes enviados até", "\" str(filesize) \" bytes recebidos.\") Args: filename(str): nome do arquivo", "\"\"\" msg = self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def encrypt(self, msg): \"\"\"Criptografia", "_RSAobj (chave privada) e byte (inicializador da chave pública) \"\"\"", "para criptografia. \"\"\" k = self.sock.recv(1024) key = RSA.importKey(k) return", "def send(self, msg): \"\"\"Método send envia strings simples através do", "base64 class Console(object): \"\"\"Superclasse Console Classe base para os terminais", "while rcvd < size: self.send('ack') nxt = self.sock.recv(1024) rcvd +=", "utf-8 -*- \"\"\"Módulo de configuração dos consoles \"\"\" from Crypto.PublicKey", "send envia strings simples através do socket O Método send", "um socket, gerando a cada envio um número inteiro referente", "Example: for b in self.sendfile('alice.txt'): if b == -1: print(\"Houve", "file.close() def receive_file(self, filename): \"\"\"Rotina de recebimento de arquivos através", "Client O Método run controla o comportamento do objeto como", "de recebimento de arquivos através de sockets Esse método controla", "O Método send é o método usado apara enviar mensagens", "chave pública) \"\"\" try: keyfile = open(key_file, 'rb') except FileNotFoundError:", "in self.sendfile('alice.txt'): if b == -1: print(\"Houve um erro na", "gerador. Veja exemplo abaixo. Example: for b in self.sendfile('alice.txt'): if", "de uma string ou trecho de bytes Args: msg (str", "msg = base64.a85encode(msg[0]) return msg def decrypt(self, msg): \"\"\"Método de", "método controla o recebeimendo de sementos de arquivos através de", "quantidade de bytes recebidos a cada nova mensagem recebida do", "de configuração dos consoles \"\"\" from Crypto.PublicKey import RSA import", "simples através do socket. As mensagens chegam criptografadas e a", "usuário tenha realizado o login com sucesso, False caso contrário", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\"Módulo de configuração", "= 160): \"\"\"Método receive recebe mensagens simples através do socket", "Returns: (bytes): trecho de bytes decifrados \"\"\" msg = base64.a85decode(msg)", "print(str(b) + \"de \" str(file_size) \"bytes enviados\") Args: filename (str):", "if b == -1: print(\"Houve um erro na transferência\") else:", "mensagens simples através do socket. As mensagens chegam criptografadas e", "FileNotFoundError: private_key = RSA.generate(1024) else: private_key = RSA.importKey(keyfile.read()) keyfile.close() finally:", "(_RSAobj) chave pública para criptografia. \"\"\" k = self.sock.recv(1024) key", "**kwargs): \"\"\"Método construtor do console Kwargs: sock (socket): socket de", "(str ou bytes): string ou bytes a serem criptografados. Returns:", "de bytes enviados ou -1, em caso de erro \"\"\"", "b = 160): \"\"\"Método receive recebe mensagens simples através do", "inicialização de par de chaves \"\"\" self.sock = kwargs.get('sock', socket.socket(socket.AF_INET,", "uma tupla contendo um par _RSAobj (chave privada) e byte", "open(filename, 'wb') rcvd = 0 while rcvd < size: self.send('ack')", "\"\"\"Módulo de configuração dos consoles \"\"\" from Crypto.PublicKey import RSA", "(str): endereço do arquivo Yields: (int) quantidade de bytes enviados", "(inicializador da chave pública) \"\"\" try: keyfile = open(key_file, 'rb')", "dentro do método receive. Args: b (int): quantidade de bytes", "msg = msg.encode('utf-8') msg = self.publickey.encrypt(msg, 3.14159265359) msg = base64.a85encode(msg[0])", "consoles \"\"\" from Crypto.PublicKey import RSA import socket import os", "do Host e o do Client O Método run controla", "def decrypt(self, msg): \"\"\"Método de conversão de um trecho criptografado", "de sementos de arquivos através de um socket. O método", "socket de comunicação key_file (str): arquivo para inicialização de par", "b (int): quantidade de bytes a serem recebidos Returns: (str)", "pública) \"\"\" try: keyfile = open(key_file, 'rb') except FileNotFoundError: private_key", "(socket): socket de comunicação key_file (str): arquivo para inicialização de", "\"\"\"Método construtor do console Kwargs: sock (socket): socket de comunicação", "a chave pública para envio. Args: key_file (str): endereço do", "mensagens simples através de um socket. Dentro desse método ocorrem", "Attributes: logged (bool): True caso o usuário tenha realizado o", "sent file.close() def receive_file(self, filename): \"\"\"Rotina de recebimento de arquivos", "(int) quantidade de bytes enviados ou -1, em caso de", "self.encrypt(msg) self.sock.send(msg) def receive(self, b = 160): \"\"\"Método receive recebe", "string ou bytes a serem criptografados. Returns: (bytes) segmento de", "private_key, public_key def receive_key(self): \"\"\"Troca de chaves no início da", "+ \" de \" str(filesize) \" bytes recebidos.\") Args: filename(str):", "do envio.\" Args: msg (str ou bytes): mensagem a ser", "a partir da chave pública recebida através de um socket.", "trecho de bytes Args: msg (str ou bytes): string ou", "(int) quantidade de bytes recebidos \"\"\" size = int(self.receive()) file", "de bytes enviados até o momento. Método deve ser usado", "= base64.a85decode(msg) msg = self.privatekey.decrypt(msg) return msg def send_file(self, filename):", "Método deve ser usado como um gerador. Veja exemplo abaixo.", "dentro do método run. \"\"\" raise NotImplemented @staticmethod def start_key(key_file):", "das chaves Esse método inicializa a chave privada e prepara,", "self.sock.send(nxt) sent += len(nxt) yield sent file.close() def receive_file(self, filename):", "enviada \"\"\" msg = self.encrypt(msg) self.sock.send(msg) def receive(self, b =", "ou bytes): mensagem a ser enviada \"\"\" msg = self.encrypt(msg)", "usado como um gerador. Veja exemplo abaixo. Example: for b", "como um gerador. Example: for b in receive_file(filename): print(str(b) +", "kwargs.get('key_file', '') if key_file: self.privatekey, self.publickey = Console.start_key(key_file) def run(self):", "msg.encode('utf-8') msg = self.publickey.encrypt(msg, 3.14159265359) msg = base64.a85encode(msg[0]) return msg", "trecho de bytes decifrados \"\"\" msg = base64.a85decode(msg) msg =", "msg (str ou bytes): string ou bytes a serem criptografados.", "receive_file(filename): print(str(b) + \" de \" str(filesize) \" bytes recebidos.\")", "(str): endereço do arquivo da chave privada Returns: (tuple) uma", "self.publickey = Console.start_key(key_file) def run(self): \"\"\"Método run difere entre o", "arquivo para inicialização de par de chaves \"\"\" self.sock =", "recebe mensagens simples através do socket. As mensagens chegam criptografadas", "0 file = open(filename, 'rb') while sent < size: ack", "também, a chave pública para envio. Args: key_file (str): endereço", "self.sock.send(msg) def receive(self, b = 160): \"\"\"Método receive recebe mensagens", "para os terminais de cliente e servidor. Attributes: logged (bool):", "(bool): True caso o usuário tenha realizado o login com", "NotImplemented @staticmethod def start_key(key_file): \"\"\"Método de inicialização das chaves Esse", "socket. Dentro desse método ocorrem as criptografias RSA e base64", "realizado o login com sucesso, False caso contrário \"\"\" def", "método run. \"\"\" raise NotImplemented @staticmethod def start_key(key_file): \"\"\"Método de", "da chave pública) \"\"\" try: keyfile = open(key_file, 'rb') except", "É através desse método que o usuário recebe mensagens simples", "(bytes): trecho de bytes decifrados \"\"\" msg = base64.a85decode(msg) msg", "pública para envio. Args: key_file (str): endereço do arquivo da", "-1, em caso de erro \"\"\" size = os.path.getsize(filename) self.send(str(size))", "bytes a serem recebidos Returns: (str) mensagem decifrada \"\"\" msg", "while sent < size: ack = self.receive() nxt = file.read(1024)", "par _RSAobj (chave privada) e byte (inicializador da chave pública)", "login com sucesso, False caso contrário \"\"\" def __init__(self, **kwargs):", "do arquivo da chave privada Returns: (tuple) uma tupla contendo", "from Crypto.PublicKey import RSA import socket import os import base64", "de mensagem a ser decifrado Returns: (bytes): trecho de bytes", "except FileNotFoundError: private_key = RSA.generate(1024) else: private_key = RSA.importKey(keyfile.read()) keyfile.close()", "key_file (str): endereço do arquivo da chave privada Returns: (tuple)", "<filename>console.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\"Módulo de", "arquivo da chave privada Returns: (tuple) uma tupla contendo um", "decrypt(self, msg): \"\"\"Método de conversão de um trecho criptografado Args:", "send é o método usado apara enviar mensagens simples através", "de um socket, gerando a cada envio um número inteiro", "msg = base64.a85decode(msg) msg = self.privatekey.decrypt(msg) return msg def send_file(self,", "RSA import socket import os import base64 class Console(object): \"\"\"Superclasse", "Console Classe base para os terminais de cliente e servidor.", "os.path.getsize(filename) self.send(str(size)) sent = 0 file = open(filename, 'rb') while", "ser decifrado Returns: (bytes): trecho de bytes decifrados \"\"\" msg", "chave pública para criptografia. \"\"\" k = self.sock.recv(1024) key =", "RSA e base64 antes do envio.\" Args: msg (str ou", "de bytes a serem recebidos Returns: (str) mensagem decifrada \"\"\"", "de sockets Esse método controla o envio sequencial de segmentos", "trecho criptografado Args: msg (bytes): trecho de mensagem a ser", "size = os.path.getsize(filename) self.send(str(size)) sent = 0 file = open(filename,", "sockets Esse método controla o envio sequencial de segmentos de", "arquivos através de sockets Esse método controla o envio sequencial", "msg): \"\"\"Criptografia de uma string ou trecho de bytes Args:", "msg def decrypt(self, msg): \"\"\"Método de conversão de um trecho", "sockets Esse método controla o recebeimendo de sementos de arquivos", "self.sock = kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file = kwargs.get('key_file', '') if", "base64.a85decode(msg) msg = self.privatekey.decrypt(msg) return msg def send_file(self, filename): \"\"\"Rotina", "self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def encrypt(self, msg): \"\"\"Criptografia de uma string", "def run(self): \"\"\"Método run difere entre o Console do Host", "def encrypt(self, msg): \"\"\"Criptografia de uma string ou trecho de", "Returns: (str) mensagem decifrada \"\"\" msg = self.decrypt(self.sock.recv(b)) return msg.decode('utf-8')", "encrypt(self, msg): \"\"\"Criptografia de uma string ou trecho de bytes", "msg def send_file(self, filename): \"\"\"Rotina de envio de arquivos através", "return msg def decrypt(self, msg): \"\"\"Método de conversão de um", "bytes recebidos.\") Args: filename(str): nome do arquivo Yields: (int) quantidade", "antes do envio.\" Args: msg (str ou bytes): mensagem a", "definido dentro do método run. \"\"\" raise NotImplemented @staticmethod def", "return msg def send_file(self, filename): \"\"\"Rotina de envio de arquivos", "através de um socket, gerando a cada envio um número", "run. \"\"\" raise NotImplemented @staticmethod def start_key(key_file): \"\"\"Método de inicialização", "Kwargs: sock (socket): socket de comunicação key_file (str): arquivo para", "bytes criptografados \"\"\" if isinstance(msg, str): msg = msg.encode('utf-8') msg", "servidor e cliente trocam suas chaves públicas um com o", "arquivo Yields: (int) quantidade de bytes enviados ou -1, em", "key_file (str): arquivo para inicialização de par de chaves \"\"\"", "\"\"\"Rotina de envio de arquivos através de sockets Esse método", "um objeto do tipo RSA público a partir da chave", "run controla o comportamento do objeto como um todo. Todo", "comportamento de um console individual deve ser definido dentro do", "conversão de um trecho criptografado Args: msg (bytes): trecho de", "= file.read(1024) self.sock.send(nxt) sent += len(nxt) yield sent file.close() def", "de bytes Args: msg (str ou bytes): string ou bytes", "bytes recebidos \"\"\" size = int(self.receive()) file = open(filename, 'wb')", "através do socket É através desse método que o usuário", "(chave privada) e byte (inicializador da chave pública) \"\"\" try:", "Dentro desse método ocorrem as criptografias RSA e base64 antes", "start_key(key_file): \"\"\"Método de inicialização das chaves Esse método inicializa a", "len(nxt) file.write(nxt) yield rcvd file.close() def __repr__(self): return \"{0}({1}, {2},", "RSA.generate(1024) else: private_key = RSA.importKey(keyfile.read()) keyfile.close() finally: public_key = private_key.publickey().exportKey()", "o comportamento de um console individual deve ser definido dentro", "Returns: (tuple) uma tupla contendo um par _RSAobj (chave privada)", "if key_file: self.privatekey, self.publickey = Console.start_key(key_file) def run(self): \"\"\"Método run", "\"de \" str(file_size) \"bytes enviados\") Args: filename (str): endereço do", "run(self): \"\"\"Método run difere entre o Console do Host e", "ack = self.receive() nxt = file.read(1024) self.sock.send(nxt) sent += len(nxt)", "Yields: (int) quantidade de bytes recebidos \"\"\" size = int(self.receive())", "file.close() def __repr__(self): return \"{0}({1}, {2}, key_file = {3})\".format(self.__class__.__name__, self.sock.__repr__(),", "msg (bytes): trecho de mensagem a ser decifrado Returns: (bytes):", "file = open(filename, 'rb') while sent < size: ack =", "\"\"\"Superclasse Console Classe base para os terminais de cliente e", "de envio de arquivos através de sockets Esse método controla", "socket import os import base64 class Console(object): \"\"\"Superclasse Console Classe", "(int): quantidade de bytes a serem recebidos Returns: (str) mensagem", "key_file: self.privatekey, self.publickey = Console.start_key(key_file) def run(self): \"\"\"Método run difere", "else: print(str(b) + \"de \" str(file_size) \"bytes enviados\") Args: filename", "envio.\" Args: msg (str ou bytes): mensagem a ser enviada", "método inicializa a chave privada e prepara, também, a chave", "= open(filename, 'wb') rcvd = 0 while rcvd < size:", "@staticmethod def start_key(key_file): \"\"\"Método de inicialização das chaves Esse método", "rcvd += len(nxt) file.write(nxt) yield rcvd file.close() def __repr__(self): return", "de um arquivo através de um socket, gerando a cada", "com o outro. Esse método retorna um objeto do tipo", "ser definido dentro do método run. \"\"\" raise NotImplemented @staticmethod", "chaves no início da comunicação Ao se conectarem, servidor e", "de arquivos através de sockets Esse método controla o recebeimendo", "O Método run controla o comportamento do objeto como um", "\"\"\" def __init__(self, **kwargs): \"\"\"Método construtor do console Kwargs: sock", "privada) e byte (inicializador da chave pública) \"\"\" try: keyfile", "públicas um com o outro. Esse método retorna um objeto", "o comportamento do objeto como um todo. Todo o comportamento", "Ao se conectarem, servidor e cliente trocam suas chaves públicas", "bytes recebidos a cada nova mensagem recebida do socket, por", "size: self.send('ack') nxt = self.sock.recv(1024) rcvd += len(nxt) file.write(nxt) yield", "e cliente trocam suas chaves públicas um com o outro.", "Yields: (int) quantidade de bytes enviados ou -1, em caso", "\"\"\" from Crypto.PublicKey import RSA import socket import os import", "file.read(1024) self.sock.send(nxt) sent += len(nxt) yield sent file.close() def receive_file(self,", "em caso de erro \"\"\" size = os.path.getsize(filename) self.send(str(size)) sent", "RSA público a partir da chave pública recebida através de", "byte (inicializador da chave pública) \"\"\" try: keyfile = open(key_file,", "de comunicação key_file (str): arquivo para inicialização de par de", "RSA.importKey(keyfile.read()) keyfile.close() finally: public_key = private_key.publickey().exportKey() return private_key, public_key def", "de conversão de um trecho criptografado Args: msg (bytes): trecho", "\"\"\" size = int(self.receive()) file = open(filename, 'wb') rcvd =", "erro na transferência\") else: print(str(b) + \"de \" str(file_size) \"bytes", "do socket O Método send é o método usado apara", "recebidos \"\"\" size = int(self.receive()) file = open(filename, 'wb') rcvd", "exemplo abaixo. Example: for b in self.sendfile('alice.txt'): if b ==", "da chave privada Returns: (tuple) uma tupla contendo um par", "isinstance(msg, str): msg = msg.encode('utf-8') msg = self.publickey.encrypt(msg, 3.14159265359) msg", "a descriptografia acontece dentro do método receive. Args: b (int):", "quantidade de bytes enviados até o momento. Método deve ser", "enviados ou -1, em caso de erro \"\"\" size =", "usado como um gerador. Example: for b in receive_file(filename): print(str(b)", "decifrada \"\"\" msg = self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def encrypt(self, msg):", "self.send(str(size)) sent = 0 file = open(filename, 'rb') while sent", "bytes Args: msg (str ou bytes): string ou bytes a", "console Kwargs: sock (socket): socket de comunicação key_file (str): arquivo", "Returns: (_RSAobj) chave pública para criptografia. \"\"\" k = self.sock.recv(1024)", "recebimento de arquivos através de sockets Esse método controla o", "bytes): mensagem a ser enviada \"\"\" msg = self.encrypt(msg) self.sock.send(msg)", "um erro na transferência\") else: print(str(b) + \"de \" str(file_size)", "= base64.a85encode(msg[0]) return msg def decrypt(self, msg): \"\"\"Método de conversão", "self.sock.recv(1024) key = RSA.importKey(k) return key def send(self, msg): \"\"\"Método", "k = self.sock.recv(1024) key = RSA.importKey(k) return key def send(self,", "caso o usuário tenha realizado o login com sucesso, False", "criptografado Args: msg (bytes): trecho de mensagem a ser decifrado", "nova mensagem recebida do socket, por tanto, deve ser usado", "ser usado como um gerador. Example: for b in receive_file(filename):", "se conectarem, servidor e cliente trocam suas chaves públicas um", "kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file = kwargs.get('key_file', '') if key_file: self.privatekey,", "privada Returns: (tuple) uma tupla contendo um par _RSAobj (chave", "a ser enviada \"\"\" msg = self.encrypt(msg) self.sock.send(msg) def receive(self,", "for b in self.sendfile('alice.txt'): if b == -1: print(\"Houve um", "chaves Esse método inicializa a chave privada e prepara, também,", "tenha realizado o login com sucesso, False caso contrário \"\"\"", "b in self.sendfile('alice.txt'): if b == -1: print(\"Houve um erro", "de bytes decifrados \"\"\" msg = base64.a85decode(msg) msg = self.privatekey.decrypt(msg)", "erro \"\"\" size = os.path.getsize(filename) self.send(str(size)) sent = 0 file", "Args: msg (str ou bytes): string ou bytes a serem", "class Console(object): \"\"\"Superclasse Console Classe base para os terminais de", "através desse método que o usuário recebe mensagens simples através", "contrário \"\"\" def __init__(self, **kwargs): \"\"\"Método construtor do console Kwargs:", "todo. Todo o comportamento de um console individual deve ser", "msg = self.publickey.encrypt(msg, 3.14159265359) msg = base64.a85encode(msg[0]) return msg def", "suas chaves públicas um com o outro. Esse método retorna", "deve ser usado como um gerador. Example: for b in", "3.14159265359) msg = base64.a85encode(msg[0]) return msg def decrypt(self, msg): \"\"\"Método", "socket. Returns: (_RSAobj) chave pública para criptografia. \"\"\" k =", "arquivo Yields: (int) quantidade de bytes recebidos \"\"\" size =", "str(filesize) \" bytes recebidos.\") Args: filename(str): nome do arquivo Yields:", "um par _RSAobj (chave privada) e byte (inicializador da chave", "finally: public_key = private_key.publickey().exportKey() return private_key, public_key def receive_key(self): \"\"\"Troca", "\"\"\" try: keyfile = open(key_file, 'rb') except FileNotFoundError: private_key =", "int(self.receive()) file = open(filename, 'wb') rcvd = 0 while rcvd", "deve ser usado como um gerador. Veja exemplo abaixo. Example:", "base64.a85encode(msg[0]) return msg def decrypt(self, msg): \"\"\"Método de conversão de", "+ \"de \" str(file_size) \"bytes enviados\") Args: filename (str): endereço", "= private_key.publickey().exportKey() return private_key, public_key def receive_key(self): \"\"\"Troca de chaves", "msg = self.encrypt(msg) self.sock.send(msg) def receive(self, b = 160): \"\"\"Método", "(bytes): trecho de mensagem a ser decifrado Returns: (bytes): trecho", "um todo. Todo o comportamento de um console individual deve", "\"\"\"Troca de chaves no início da comunicação Ao se conectarem,", "pública recebida através de um socket. Returns: (_RSAobj) chave pública", "objeto como um todo. Todo o comportamento de um console", "key def send(self, msg): \"\"\"Método send envia strings simples através", "sent < size: ack = self.receive() nxt = file.read(1024) self.sock.send(nxt)", "ser usado como um gerador. Veja exemplo abaixo. Example: for", "que o usuário recebe mensagens simples através do socket. As", "o envio sequencial de segmentos de um arquivo através de", "retorna um objeto do tipo RSA público a partir da", "console individual deve ser definido dentro do método run. \"\"\"", "do arquivo Yields: (int) quantidade de bytes recebidos \"\"\" size", "import base64 class Console(object): \"\"\"Superclasse Console Classe base para os", "key_file = kwargs.get('key_file', '') if key_file: self.privatekey, self.publickey = Console.start_key(key_file)", "\"\"\" if isinstance(msg, str): msg = msg.encode('utf-8') msg = self.publickey.encrypt(msg,", "segmentos de um arquivo através de um socket, gerando a", "terminais de cliente e servidor. Attributes: logged (bool): True caso", "rcvd = 0 while rcvd < size: self.send('ack') nxt =", "recebidos.\") Args: filename(str): nome do arquivo Yields: (int) quantidade de", "recebe mensagens simples através do socket É através desse método", "Método send é o método usado apara enviar mensagens simples", "Esse método retorna um objeto do tipo RSA público a", "e a descriptografia acontece dentro do método receive. Args: b", "import socket import os import base64 class Console(object): \"\"\"Superclasse Console", "entre o Console do Host e o do Client O", "do método receive. Args: b (int): quantidade de bytes a", "criptografados \"\"\" if isinstance(msg, str): msg = msg.encode('utf-8') msg =", "número inteiro referente a quantidade de bytes enviados até o", "= RSA.importKey(k) return key def send(self, msg): \"\"\"Método send envia", "nxt = file.read(1024) self.sock.send(nxt) sent += len(nxt) yield sent file.close()", "\"\"\" msg = base64.a85decode(msg) msg = self.privatekey.decrypt(msg) return msg def", "def __repr__(self): return \"{0}({1}, {2}, key_file = {3})\".format(self.__class__.__name__, self.sock.__repr__(), self.client.__repr__(),", "msg (str ou bytes): mensagem a ser enviada \"\"\" msg", "recebidos a cada nova mensagem recebida do socket, por tanto,", "Console.start_key(key_file) def run(self): \"\"\"Método run difere entre o Console do", "comunicação Ao se conectarem, servidor e cliente trocam suas chaves", "recebida através de um socket. Returns: (_RSAobj) chave pública para", "é o método usado apara enviar mensagens simples através de", "socket. As mensagens chegam criptografadas e a descriptografia acontece dentro", "cada nova mensagem recebida do socket, por tanto, deve ser", "cliente trocam suas chaves públicas um com o outro. Esse", "sucesso, False caso contrário \"\"\" def __init__(self, **kwargs): \"\"\"Método construtor", "= RSA.importKey(keyfile.read()) keyfile.close() finally: public_key = private_key.publickey().exportKey() return private_key, public_key", "print(str(b) + \" de \" str(filesize) \" bytes recebidos.\") Args:", "partir da chave pública recebida através de um socket. Returns:", "o login com sucesso, False caso contrário \"\"\" def __init__(self,", "o outro. Esse método retorna um objeto do tipo RSA", "string ou trecho de bytes Args: msg (str ou bytes):", "método retorna um objeto do tipo RSA público a partir", "um arquivo através de um socket, gerando a cada envio", "mensagens chegam criptografadas e a descriptografia acontece dentro do método", "= self.privatekey.decrypt(msg) return msg def send_file(self, filename): \"\"\"Rotina de envio", "enviar mensagens simples através de um socket. Dentro desse método", "endereço do arquivo Yields: (int) quantidade de bytes enviados ou", "até o momento. Método deve ser usado como um gerador.", "conectarem, servidor e cliente trocam suas chaves públicas um com", "simples através de um socket. Dentro desse método ocorrem as", "= self.publickey.encrypt(msg, 3.14159265359) msg = base64.a85encode(msg[0]) return msg def decrypt(self,", "cliente e servidor. Attributes: logged (bool): True caso o usuário", "\"\"\"Método de conversão de um trecho criptografado Args: msg (bytes):", "envio. Args: key_file (str): endereço do arquivo da chave privada", "Método run controla o comportamento do objeto como um todo.", "objeto do tipo RSA público a partir da chave pública", "de arquivos através de um socket. O método gera a", "pública para criptografia. \"\"\" k = self.sock.recv(1024) key = RSA.importKey(k)", "de \" str(filesize) \" bytes recebidos.\") Args: filename(str): nome do", "do Client O Método run controla o comportamento do objeto", "simples através do socket O Método send é o método", "msg): \"\"\"Método de conversão de um trecho criptografado Args: msg", "método gera a quantidade de bytes recebidos a cada nova", "msg.decode('utf-8') def encrypt(self, msg): \"\"\"Criptografia de uma string ou trecho", "b == -1: print(\"Houve um erro na transferência\") else: print(str(b)", "\"\"\" msg = self.encrypt(msg) self.sock.send(msg) def receive(self, b = 160):", "open(filename, 'rb') while sent < size: ack = self.receive() nxt", "e base64 antes do envio.\" Args: msg (str ou bytes):", "através de sockets Esse método controla o envio sequencial de", "mensagens simples através do socket É através desse método que", "= kwargs.get('key_file', '') if key_file: self.privatekey, self.publickey = Console.start_key(key_file) def", "trocam suas chaves públicas um com o outro. Esse método", "da chave pública recebida através de um socket. Returns: (_RSAobj)", "Host e o do Client O Método run controla o", "(str) mensagem decifrada \"\"\" msg = self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def", "método usado apara enviar mensagens simples através de um socket.", "+= len(nxt) yield sent file.close() def receive_file(self, filename): \"\"\"Rotina de", "receive(self, b = 160): \"\"\"Método receive recebe mensagens simples através", "os import base64 class Console(object): \"\"\"Superclasse Console Classe base para", "def send_file(self, filename): \"\"\"Rotina de envio de arquivos através de", "para inicialização de par de chaves \"\"\" self.sock = kwargs.get('sock',", "= int(self.receive()) file = open(filename, 'wb') rcvd = 0 while", "do objeto como um todo. Todo o comportamento de um", "simples através do socket É através desse método que o", "Args: filename (str): endereço do arquivo Yields: (int) quantidade de", "do arquivo Yields: (int) quantidade de bytes enviados ou -1,", "-1: print(\"Houve um erro na transferência\") else: print(str(b) + \"de", "import os import base64 class Console(object): \"\"\"Superclasse Console Classe base", "de arquivos através de sockets Esse método controla o envio", "< size: self.send('ack') nxt = self.sock.recv(1024) rcvd += len(nxt) file.write(nxt)", "de cliente e servidor. Attributes: logged (bool): True caso o", "método ocorrem as criptografias RSA e base64 antes do envio.\"", "keyfile.close() finally: public_key = private_key.publickey().exportKey() return private_key, public_key def receive_key(self):", "RSA.importKey(k) return key def send(self, msg): \"\"\"Método send envia strings", "= RSA.generate(1024) else: private_key = RSA.importKey(keyfile.read()) keyfile.close() finally: public_key =", "self.send('ack') nxt = self.sock.recv(1024) rcvd += len(nxt) file.write(nxt) yield rcvd", "sementos de arquivos através de um socket. O método gera", "e prepara, também, a chave pública para envio. Args: key_file", "= self.receive() nxt = file.read(1024) self.sock.send(nxt) sent += len(nxt) yield", "tupla contendo um par _RSAobj (chave privada) e byte (inicializador", "a serem criptografados. Returns: (bytes) segmento de bytes criptografados \"\"\"", "acontece dentro do método receive. Args: b (int): quantidade de", "= 0 while rcvd < size: self.send('ack') nxt = self.sock.recv(1024)", "inicialização das chaves Esse método inicializa a chave privada e", "usado apara enviar mensagens simples através de um socket. Dentro", "Args: msg (str ou bytes): mensagem a ser enviada \"\"\"", "serem recebidos Returns: (str) mensagem decifrada \"\"\" msg = self.decrypt(self.sock.recv(b))", "controla o recebeimendo de sementos de arquivos através de um", "gera a quantidade de bytes recebidos a cada nova mensagem", "\"\"\"Método send envia strings simples através do socket O Método", "def receive_file(self, filename): \"\"\"Rotina de recebimento de arquivos através de", "quantidade de bytes enviados ou -1, em caso de erro", "a cada envio um número inteiro referente a quantidade de", "\"\"\"Criptografia de uma string ou trecho de bytes Args: msg", "Esse método controla o recebeimendo de sementos de arquivos através", "strings simples através do socket O Método send é o", "__repr__(self): return \"{0}({1}, {2}, key_file = {3})\".format(self.__class__.__name__, self.sock.__repr__(), self.client.__repr__(), repr(self.key_file))", "return key def send(self, msg): \"\"\"Método send envia strings simples", "de bytes recebidos a cada nova mensagem recebida do socket,", "transferência\") else: print(str(b) + \"de \" str(file_size) \"bytes enviados\") Args:", "de sockets Esse método controla o recebeimendo de sementos de", "na transferência\") else: print(str(b) + \"de \" str(file_size) \"bytes enviados\")", "size = int(self.receive()) file = open(filename, 'wb') rcvd = 0", "os terminais de cliente e servidor. Attributes: logged (bool): True", "caso de erro \"\"\" size = os.path.getsize(filename) self.send(str(size)) sent =", "= self.sock.recv(1024) key = RSA.importKey(k) return key def send(self, msg):", "import RSA import socket import os import base64 class Console(object):", "return private_key, public_key def receive_key(self): \"\"\"Troca de chaves no início", "um socket. Returns: (_RSAobj) chave pública para criptografia. \"\"\" k", "b in receive_file(filename): print(str(b) + \" de \" str(filesize) \"", "\"\"\" self.sock = kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file = kwargs.get('key_file', '')", "com sucesso, False caso contrário \"\"\" def __init__(self, **kwargs): \"\"\"Método", "\"\"\"Método de inicialização das chaves Esse método inicializa a chave", "a ser decifrado Returns: (bytes): trecho de bytes decifrados \"\"\"", "a quantidade de bytes enviados até o momento. Método deve", "socket.SOCK_STREAM)) key_file = kwargs.get('key_file', '') if key_file: self.privatekey, self.publickey =", "sent = 0 file = open(filename, 'rb') while sent <", "de bytes recebidos \"\"\" size = int(self.receive()) file = open(filename,", "nxt = self.sock.recv(1024) rcvd += len(nxt) file.write(nxt) yield rcvd file.close()", "caso contrário \"\"\" def __init__(self, **kwargs): \"\"\"Método construtor do console", "def __init__(self, **kwargs): \"\"\"Método construtor do console Kwargs: sock (socket):", "quantidade de bytes recebidos \"\"\" size = int(self.receive()) file =", "inicializa a chave privada e prepara, também, a chave pública", "size: ack = self.receive() nxt = file.read(1024) self.sock.send(nxt) sent +=", "base para os terminais de cliente e servidor. Attributes: logged", "cada envio um número inteiro referente a quantidade de bytes", "controla o envio sequencial de segmentos de um arquivo através", "chave pública recebida através de um socket. Returns: (_RSAobj) chave", "def receive(self, b = 160): \"\"\"Método receive recebe mensagens simples", "Console do Host e o do Client O Método run", "o método usado apara enviar mensagens simples através de um", "descriptografia acontece dentro do método receive. Args: b (int): quantidade", "Veja exemplo abaixo. Example: for b in self.sendfile('alice.txt'): if b", "um gerador. Veja exemplo abaixo. Example: for b in self.sendfile('alice.txt'):", "print(\"Houve um erro na transferência\") else: print(str(b) + \"de \"", "criptografia. \"\"\" k = self.sock.recv(1024) key = RSA.importKey(k) return key", "\" bytes recebidos.\") Args: filename(str): nome do arquivo Yields: (int)", "como um todo. Todo o comportamento de um console individual", "através do socket O Método send é o método usado", "open(key_file, 'rb') except FileNotFoundError: private_key = RSA.generate(1024) else: private_key =", "de um socket. Dentro desse método ocorrem as criptografias RSA", "receive recebe mensagens simples através do socket É através desse", "um com o outro. Esse método retorna um objeto do", "private_key.publickey().exportKey() return private_key, public_key def receive_key(self): \"\"\"Troca de chaves no", "as criptografias RSA e base64 antes do envio.\" Args: msg", "de um console individual deve ser definido dentro do método", "através de um socket. Dentro desse método ocorrem as criptografias", "privada e prepara, também, a chave pública para envio. Args:", "\"\"\"Rotina de recebimento de arquivos através de sockets Esse método", "Args: filename(str): nome do arquivo Yields: (int) quantidade de bytes", "ou bytes): string ou bytes a serem criptografados. Returns: (bytes)", "Console(object): \"\"\"Superclasse Console Classe base para os terminais de cliente", "filename (str): endereço do arquivo Yields: (int) quantidade de bytes", "tipo RSA público a partir da chave pública recebida através", "run difere entre o Console do Host e o do", "inteiro referente a quantidade de bytes enviados até o momento.", "através de um socket. O método gera a quantidade de", "0 while rcvd < size: self.send('ack') nxt = self.sock.recv(1024) rcvd", "yield sent file.close() def receive_file(self, filename): \"\"\"Rotina de recebimento de", "referente a quantidade de bytes enviados até o momento. Método", "ocorrem as criptografias RSA e base64 antes do envio.\" Args:", "o usuário recebe mensagens simples através do socket. As mensagens", "ou -1, em caso de erro \"\"\" size = os.path.getsize(filename)", "sent += len(nxt) yield sent file.close() def receive_file(self, filename): \"\"\"Rotina", "socket O Método send é o método usado apara enviar", "160): \"\"\"Método receive recebe mensagens simples através do socket É", "As mensagens chegam criptografadas e a descriptografia acontece dentro do", "recebidos Returns: (str) mensagem decifrada \"\"\" msg = self.decrypt(self.sock.recv(b)) return", "o momento. Método deve ser usado como um gerador. Veja", "bytes): string ou bytes a serem criptografados. Returns: (bytes) segmento", "True caso o usuário tenha realizado o login com sucesso,", "individual deve ser definido dentro do método run. \"\"\" raise", "do método run. \"\"\" raise NotImplemented @staticmethod def start_key(key_file): \"\"\"Método", "um socket. O método gera a quantidade de bytes recebidos", "base64 antes do envio.\" Args: msg (str ou bytes): mensagem", "(bytes) segmento de bytes criptografados \"\"\" if isinstance(msg, str): msg", "== -1: print(\"Houve um erro na transferência\") else: print(str(b) +", "recebeimendo de sementos de arquivos através de um socket. O", "do console Kwargs: sock (socket): socket de comunicação key_file (str):", "envia strings simples através do socket O Método send é", "try: keyfile = open(key_file, 'rb') except FileNotFoundError: private_key = RSA.generate(1024)", "yield rcvd file.close() def __repr__(self): return \"{0}({1}, {2}, key_file =", "Crypto.PublicKey import RSA import socket import os import base64 class", "por tanto, deve ser usado como um gerador. Example: for", "de inicialização das chaves Esse método inicializa a chave privada", "send_file(self, filename): \"\"\"Rotina de envio de arquivos através de sockets", "= Console.start_key(key_file) def run(self): \"\"\"Método run difere entre o Console", "self.publickey.encrypt(msg, 3.14159265359) msg = base64.a85encode(msg[0]) return msg def decrypt(self, msg):", "socket É através desse método que o usuário recebe mensagens", "Args: key_file (str): endereço do arquivo da chave privada Returns:", "file = open(filename, 'wb') rcvd = 0 while rcvd <", "para envio. Args: key_file (str): endereço do arquivo da chave", "outro. Esse método retorna um objeto do tipo RSA público", "Esse método inicializa a chave privada e prepara, também, a", "= self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def encrypt(self, msg): \"\"\"Criptografia de uma", "send(self, msg): \"\"\"Método send envia strings simples através do socket", "sequencial de segmentos de um arquivo através de um socket,", "tanto, deve ser usado como um gerador. Example: for b", "= 0 file = open(filename, 'rb') while sent < size:", "do socket É através desse método que o usuário recebe", "logged (bool): True caso o usuário tenha realizado o login", "msg): \"\"\"Método send envia strings simples através do socket O", "-*- \"\"\"Módulo de configuração dos consoles \"\"\" from Crypto.PublicKey import", "desse método que o usuário recebe mensagens simples através do", "de um socket. O método gera a quantidade de bytes", "mensagem a ser enviada \"\"\" msg = self.encrypt(msg) self.sock.send(msg) def", "da comunicação Ao se conectarem, servidor e cliente trocam suas", "\"\"\"Método run difere entre o Console do Host e o", "comportamento do objeto como um todo. Todo o comportamento de", "ou bytes a serem criptografados. Returns: (bytes) segmento de bytes", "o Console do Host e o do Client O Método", "O método gera a quantidade de bytes recebidos a cada", "do socket, por tanto, deve ser usado como um gerador.", "keyfile = open(key_file, 'rb') except FileNotFoundError: private_key = RSA.generate(1024) else:", "de um socket. Returns: (_RSAobj) chave pública para criptografia. \"\"\"", "chaves públicas um com o outro. Esse método retorna um", "construtor do console Kwargs: sock (socket): socket de comunicação key_file", "receive. Args: b (int): quantidade de bytes a serem recebidos", "in receive_file(filename): print(str(b) + \" de \" str(filesize) \" bytes", "comunicação key_file (str): arquivo para inicialização de par de chaves", "mensagem recebida do socket, por tanto, deve ser usado como", "um trecho criptografado Args: msg (bytes): trecho de mensagem a", "self.sendfile('alice.txt'): if b == -1: print(\"Houve um erro na transferência\")", "enviados\") Args: filename (str): endereço do arquivo Yields: (int) quantidade", "self.receive() nxt = file.read(1024) self.sock.send(nxt) sent += len(nxt) yield sent", "filename(str): nome do arquivo Yields: (int) quantidade de bytes recebidos", "Esse método controla o envio sequencial de segmentos de um", "public_key = private_key.publickey().exportKey() return private_key, public_key def receive_key(self): \"\"\"Troca de", "\"\"\" size = os.path.getsize(filename) self.send(str(size)) sent = 0 file =", "de um trecho criptografado Args: msg (bytes): trecho de mensagem", "= open(key_file, 'rb') except FileNotFoundError: private_key = RSA.generate(1024) else: private_key", "trecho de mensagem a ser decifrado Returns: (bytes): trecho de", "filename): \"\"\"Rotina de envio de arquivos através de sockets Esse", "difere entre o Console do Host e o do Client", "chave pública para envio. Args: key_file (str): endereço do arquivo", "(tuple) uma tupla contendo um par _RSAobj (chave privada) e" ]
[ "SHOULD be defined: * DJANGO_SECRET_KEY You may also want to", "= (\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS = [ { \"NAME\": ( \"django.contrib.auth.password_validation.\"", "X-Forwarded-Proto header from all incoming requests # - Your proxy", "proxy strips the X-Forwarded-Proto header from all incoming requests #", "configuration mixins in ./configurations and compose the Base configuration with", "\"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] # Application ROOT_URLCONF = \"urls\" WSGI_APPLICATION", "ROOT_URLCONF = \"urls\" WSGI_APPLICATION = \"wsgi.application\" # Database DATABASES =", "every configuration (aka environnement) should inherit from. It is recommended", "Path from tempfile import mkdtemp from configurations import Configuration, values", "you should comment the following line to avoid security issues.", "environ_prefix=None ), \"HOST\": values.Value( \"localhost\", environ_name=\"DB_HOST\", environ_prefix=None ), \"PORT\": values.Value(5432,", "mixins in ./configurations and compose the Base configuration with those", "You may also want to override default configuration by setting", "Django # # In other cases, you should comment the", "( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ), }, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\":", "Security ALLOWED_HOSTS = [] SECRET_KEY = values.Value(None) # SECURE_PROXY_SSL_HEADER allows", "comment the following line to avoid security issues. SECURE_PROXY_SSL_HEADER =", "the scheme in Django's HttpRequest # object when you application", "reverse proxy. # # Keep this SECURE_PROXY_SSL_HEADER configuration only if", "incoming requests # - Your proxy sets the X-Forwarded-Proto header", "to fix the scheme in Django's HttpRequest # object when", "following environment variables: * DB_NAME * DB_HOST * DB_PASSWORD *", "Development environment settings We set DEBUG to True and configure", "definition INSTALLED_APPS = [ \"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\",", "your proxy strips the X-Forwarded-Proto header from all incoming requests", "}, }, ] class Development(Base): \"\"\" Development environment settings We", "environ_prefix=None), \"PASSWORD\": values.Value( \"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None ), \"HOST\": values.Value( \"localhost\",", "[ \"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\" class Test(Base): \"\"\"Test environment", "Application definition INSTALLED_APPS = [ \"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\",", "DATA_DIR.joinpath(\"static\") MEDIA_URL = \"/media/\" MEDIA_ROOT = DATA_DIR.joinpath(\"media\") # Internationalization LANGUAGE_CODE", "MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\" class Test(Base): \"\"\"Test environment settings\"\"\" MEDIA_ROOT =", "ROOT_URLCONF = \"urls.debug\" # Application definition INSTALLED_APPS = Base.INSTALLED_APPS +", "This is the base configuration every configuration (aka environnement) should", "the X-Forwarded-Proto header and sends it to Django # #", "to True and configure the server to respond from all", "recommended to configure third-party applications by creating a configuration mixins", "] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\" class Test(Base): \"\"\"Test environment settings\"\"\" MEDIA_ROOT", "compose the Base configuration with those mixins. It depends on", "= DATA_DIR.joinpath(\"static\") MEDIA_URL = \"/media/\" MEDIA_ROOT = DATA_DIR.joinpath(\"media\") # Internationalization", "= Path(\"/data\") # pylint: disable=no-init class Base(Configuration): \"\"\" This is", "\"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None, ), \"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None),", "\"APP_DIRS\": True, \"OPTIONS\": { \"context_processors\": [ \"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\",", "values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None), } } # Static files (CSS, JavaScript,", "with those mixins. It depends on an environment variable that", "), \"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None), \"USER\": values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\":", "configuration every configuration (aka environnement) should inherit from. It is", "\"context_processors\": [ \"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ], }, }, ]", "the Base configuration with those mixins. It depends on an", "- your proxy strips the X-Forwarded-Proto header from all incoming", "from all hosts. \"\"\" DEBUG = True ALLOWED_HOSTS = [\"*\"]", "line to avoid security issues. SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS", "fix the scheme in Django's HttpRequest # object when you", "\"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\" class Test(Base): \"\"\"Test environment settings\"\"\"", "X-Forwarded-Proto header and sends it to Django # # In", "applications by creating a configuration mixins in ./configurations and compose", "from tempfile import mkdtemp from configurations import Configuration, values BASE_DIR", "\"howard.defaults.DocumentIssuerChoices\" class Test(Base): \"\"\"Test environment settings\"\"\" MEDIA_ROOT = Path(mkdtemp()) ROOT_URLCONF", "project. \"\"\" from pathlib import Path from tempfile import mkdtemp", "third-party applications by creating a configuration mixins in ./configurations and", "all hosts. \"\"\" DEBUG = True ALLOWED_HOSTS = [\"*\"] ROOT_URLCONF", "}, ] class Development(Base): \"\"\" Development environment settings We set", "values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None, ), \"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None), \"USER\":", "\"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None), \"USER\": values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\": values.Value(", "server to respond from all hosts. \"\"\" DEBUG = True", "\"\"\" DEBUG = True ALLOWED_HOSTS = [\"*\"] ROOT_URLCONF = \"urls.debug\"", "{ \"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None, ), \"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\",", "environment variable that SHOULD be defined: * DJANGO_SECRET_KEY You may", "Configuration, values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR = Path(\"/data\") # pylint:", "# Application definition INSTALLED_APPS = Base.INSTALLED_APPS + [ \"howard\", ]", "./configurations and compose the Base configuration with those mixins. It", "# object when you application is behind a reverse proxy.", "DEBUG = True ALLOWED_HOSTS = [\"*\"] ROOT_URLCONF = \"urls.debug\" #", "Images) STATIC_URL = \"/static/\" STATIC_ROOT = DATA_DIR.joinpath(\"static\") MEDIA_URL = \"/media/\"", "environment variables: * DB_NAME * DB_HOST * DB_PASSWORD * DB_USER", "following line to avoid security issues. SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")", "USE_I18N = True USE_L10N = True USE_TZ = True #", "# pylint: disable=no-init class Base(Configuration): \"\"\" This is the base", "setting the following environment variables: * DB_NAME * DB_HOST *", "\"urls\" WSGI_APPLICATION = \"wsgi.application\" # Database DATABASES = { \"default\":", "] class Development(Base): \"\"\" Development environment settings We set DEBUG", "if : # - your Django app is behind a", "the base configuration every configuration (aka environnement) should inherit from.", "mkdtemp from configurations import Configuration, values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR", "(aka environnement) should inherit from. It is recommended to configure", "values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\": values.Value( \"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None ), \"HOST\":", "\"localhost\", environ_name=\"DB_HOST\", environ_prefix=None ), \"PORT\": values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None), } }", "= \"wsgi.application\" # Database DATABASES = { \"default\": { \"ENGINE\":", "\"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ], }, }, ] class Development(Base): \"\"\"", "\"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None ), \"HOST\": values.Value( \"localhost\", environ_name=\"DB_HOST\", environ_prefix=None ),", "import mkdtemp from configurations import Configuration, values BASE_DIR = Path(__file__).parent.resolve()", "in ./configurations and compose the Base configuration with those mixins.", "\"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\", \"marion\", ] MIDDLEWARE = [ \"django.middleware.security.SecurityMiddleware\",", "\"wsgi.application\" # Database DATABASES = { \"default\": { \"ENGINE\": values.Value(", "and compose the Base configuration with those mixins. It depends", "environ_prefix=None ), \"PORT\": values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None), } } # Static", "scheme in Django's HttpRequest # object when you application is", "{ \"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\": [], \"APP_DIRS\": True, \"OPTIONS\": { \"context_processors\":", "Database DATABASES = { \"default\": { \"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\",", "environ_name=\"DB_ENGINE\", environ_prefix=None, ), \"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None), \"USER\": values.Value(\"fun\", environ_name=\"DB_USER\",", "configurations import Configuration, values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR = Path(\"/data\")", "header and sends it to Django # # In other", "hosts. \"\"\" DEBUG = True ALLOWED_HOSTS = [\"*\"] ROOT_URLCONF =", "* DB_HOST * DB_PASSWORD * DB_USER \"\"\" DEBUG = False", ": # - your Django app is behind a proxy.", "[\"*\"] ROOT_URLCONF = \"urls.debug\" # Application definition INSTALLED_APPS = Base.INSTALLED_APPS", "SECRET_KEY = values.Value(None) # SECURE_PROXY_SSL_HEADER allows to fix the scheme", "from all incoming requests # - Your proxy sets the", "{\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] # Application ROOT_URLCONF = \"urls\" WSGI_APPLICATION =", "app is behind a proxy. # - your proxy strips", "values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None), \"USER\": values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\": values.Value( \"pass\",", "the following line to avoid security issues. SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\",", "application is behind a reverse proxy. # # Keep this", "= \"/media/\" MEDIA_ROOT = DATA_DIR.joinpath(\"media\") # Internationalization LANGUAGE_CODE = \"en-us\"", "USE_L10N = True USE_TZ = True # Application definition INSTALLED_APPS", "\"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ], }, }, ] class Development(Base):", "SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS = [ { \"NAME\": (", "default configuration by setting the following environment variables: * DB_NAME", "# Static files (CSS, JavaScript, Images) STATIC_URL = \"/static/\" STATIC_ROOT", "Keep this SECURE_PROXY_SSL_HEADER configuration only if : # - your", "environ_prefix=None), \"USER\": values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\": values.Value( \"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None", "environ_prefix=None), } } # Static files (CSS, JavaScript, Images) STATIC_URL", "be defined: * DJANGO_SECRET_KEY You may also want to override", "\"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\", \"marion\", ] MIDDLEWARE", "to configure third-party applications by creating a configuration mixins in", "Base configuration with those mixins. It depends on an environment", "\"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES = [ { \"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\": [],", "environ_prefix=None, ), \"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None), \"USER\": values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None),", "[ \"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\", \"marion\", ]", "DB_HOST * DB_PASSWORD * DB_USER \"\"\" DEBUG = False #", "{\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] # Application ROOT_URLCONF", "True # Application definition INSTALLED_APPS = [ \"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\",", "False # Security ALLOWED_HOSTS = [] SECRET_KEY = values.Value(None) #", "Your proxy sets the X-Forwarded-Proto header and sends it to", "\"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\": [], \"APP_DIRS\": True, \"OPTIONS\": { \"context_processors\": [", "{ \"context_processors\": [ \"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ], }, },", "all incoming requests # - Your proxy sets the X-Forwarded-Proto", "and sends it to Django # # In other cases,", "environ_name=\"DB_PASSWORD\", environ_prefix=None ), \"HOST\": values.Value( \"localhost\", environ_name=\"DB_HOST\", environ_prefix=None ), \"PORT\":", "\"DIRS\": [], \"APP_DIRS\": True, \"OPTIONS\": { \"context_processors\": [ \"django.template.context_processors.debug\", \"django.template.context_processors.request\",", "\"USER\": values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\": values.Value( \"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None ),", "from pathlib import Path from tempfile import mkdtemp from configurations", "pylint: disable=no-init class Base(Configuration): \"\"\" This is the base configuration", "\"marion\", ] MIDDLEWARE = [ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\",", "\"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ], }, }, ] class Development(Base): \"\"\" Development", "= \"urls.debug\" # Application definition INSTALLED_APPS = Base.INSTALLED_APPS + [", "creating a configuration mixins in ./configurations and compose the Base", "the following environment variables: * DB_NAME * DB_HOST * DB_PASSWORD", "pathlib import Path from tempfile import mkdtemp from configurations import", "files (CSS, JavaScript, Images) STATIC_URL = \"/static/\" STATIC_ROOT = DATA_DIR.joinpath(\"static\")", "a reverse proxy. # # Keep this SECURE_PROXY_SSL_HEADER configuration only", "Test(Base): \"\"\"Test environment settings\"\"\" MEDIA_ROOT = Path(mkdtemp()) ROOT_URLCONF = \"urls.debug\"", "\"\"\" Development environment settings We set DEBUG to True and", "DB_USER \"\"\" DEBUG = False # Security ALLOWED_HOSTS = []", "DB_PASSWORD * DB_USER \"\"\" DEBUG = False # Security ALLOWED_HOSTS", "proxy sets the X-Forwarded-Proto header and sends it to Django", "* DB_USER \"\"\" DEBUG = False # Security ALLOWED_HOSTS =", "True USE_L10N = True USE_TZ = True # Application definition", "{ \"NAME\": ( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ), }, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\":", "= Base.INSTALLED_APPS + [ \"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\" class", "Django settings for marion project. \"\"\" from pathlib import Path", "= \"/static/\" STATIC_ROOT = DATA_DIR.joinpath(\"static\") MEDIA_URL = \"/media/\" MEDIA_ROOT =", "= Path(__file__).parent.resolve() DATA_DIR = Path(\"/data\") # pylint: disable=no-init class Base(Configuration):", "It depends on an environment variable that SHOULD be defined:", "\"UserAttributeSimilarityValidator\" ), }, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ]", "definition INSTALLED_APPS = Base.INSTALLED_APPS + [ \"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS =", "respond from all hosts. \"\"\" DEBUG = True ALLOWED_HOSTS =", "] MIDDLEWARE = [ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\",", "sets the X-Forwarded-Proto header and sends it to Django #", "to Django # # In other cases, you should comment", "\"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES = [ { \"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\":", "* DB_PASSWORD * DB_USER \"\"\" DEBUG = False # Security", "# Internationalization LANGUAGE_CODE = \"en-us\" TIME_ZONE = \"UTC\" USE_I18N =", "\"django.contrib.staticfiles\", \"rest_framework\", \"marion\", ] MIDDLEWARE = [ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\",", "the X-Forwarded-Proto header from all incoming requests # - Your", "= DATA_DIR.joinpath(\"media\") # Internationalization LANGUAGE_CODE = \"en-us\" TIME_ZONE = \"UTC\"", "# Keep this SECURE_PROXY_SSL_HEADER configuration only if : # -", "configuration by setting the following environment variables: * DB_NAME *", "- your Django app is behind a proxy. # -", "} # Static files (CSS, JavaScript, Images) STATIC_URL = \"/static/\"", "= True USE_L10N = True USE_TZ = True # Application", "\"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\", \"marion\", ] MIDDLEWARE = [", "behind a reverse proxy. # # Keep this SECURE_PROXY_SSL_HEADER configuration", "STATIC_URL = \"/static/\" STATIC_ROOT = DATA_DIR.joinpath(\"static\") MEDIA_URL = \"/media/\" MEDIA_ROOT", "INSTALLED_APPS = [ \"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\",", "= [\"*\"] ROOT_URLCONF = \"urls.debug\" # Application definition INSTALLED_APPS =", "Application ROOT_URLCONF = \"urls\" WSGI_APPLICATION = \"wsgi.application\" # Database DATABASES", "proxy. # # Keep this SECURE_PROXY_SSL_HEADER configuration only if :", "for marion project. \"\"\" from pathlib import Path from tempfile", "variables: * DB_NAME * DB_HOST * DB_PASSWORD * DB_USER \"\"\"", "), \"PORT\": values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None), } } # Static files", "from. It is recommended to configure third-party applications by creating", "We set DEBUG to True and configure the server to", "configure the server to respond from all hosts. \"\"\" DEBUG", "\"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES = [ {", "USE_TZ = True # Application definition INSTALLED_APPS = [ \"django.contrib.admin\",", "[ { \"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\": [], \"APP_DIRS\": True, \"OPTIONS\": {", "configuration only if : # - your Django app is", "that SHOULD be defined: * DJANGO_SECRET_KEY You may also want", "}, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] # Application", "{ \"default\": { \"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None, ), \"NAME\":", "class Base(Configuration): \"\"\" This is the base configuration every configuration", "DATA_DIR = Path(\"/data\") # pylint: disable=no-init class Base(Configuration): \"\"\" This", "environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\": values.Value( \"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None ), \"HOST\": values.Value(", "It is recommended to configure third-party applications by creating a", "by setting the following environment variables: * DB_NAME * DB_HOST", "\"django.template.backends.django.DjangoTemplates\", \"DIRS\": [], \"APP_DIRS\": True, \"OPTIONS\": { \"context_processors\": [ \"django.template.context_processors.debug\",", "Base(Configuration): \"\"\" This is the base configuration every configuration (aka", "the server to respond from all hosts. \"\"\" DEBUG =", "} } # Static files (CSS, JavaScript, Images) STATIC_URL =", "in Django's HttpRequest # object when you application is behind", "ALLOWED_HOSTS = [] SECRET_KEY = values.Value(None) # SECURE_PROXY_SSL_HEADER allows to", "defined: * DJANGO_SECRET_KEY You may also want to override default", "is the base configuration every configuration (aka environnement) should inherit", "True ALLOWED_HOSTS = [\"*\"] ROOT_URLCONF = \"urls.debug\" # Application definition", "# Security ALLOWED_HOSTS = [] SECRET_KEY = values.Value(None) # SECURE_PROXY_SSL_HEADER", "BASE_DIR = Path(__file__).parent.resolve() DATA_DIR = Path(\"/data\") # pylint: disable=no-init class", "environnement) should inherit from. It is recommended to configure third-party", "MEDIA_ROOT = DATA_DIR.joinpath(\"media\") # Internationalization LANGUAGE_CODE = \"en-us\" TIME_ZONE =", "* DB_NAME * DB_HOST * DB_PASSWORD * DB_USER \"\"\" DEBUG", "environment settings We set DEBUG to True and configure the", "# - Your proxy sets the X-Forwarded-Proto header and sends", "should comment the following line to avoid security issues. SECURE_PROXY_SSL_HEADER", "disable=no-init class Base(Configuration): \"\"\" This is the base configuration every", "should inherit from. It is recommended to configure third-party applications", "] TEMPLATES = [ { \"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\": [], \"APP_DIRS\":", "variable that SHOULD be defined: * DJANGO_SECRET_KEY You may also", "configuration (aka environnement) should inherit from. It is recommended to", "to override default configuration by setting the following environment variables:", "DB_NAME * DB_HOST * DB_PASSWORD * DB_USER \"\"\" DEBUG =", "settings for marion project. \"\"\" from pathlib import Path from", "class Development(Base): \"\"\" Development environment settings We set DEBUG to", "DJANGO_SECRET_KEY You may also want to override default configuration by", "issues. SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS = [ { \"NAME\":", "to respond from all hosts. \"\"\" DEBUG = True ALLOWED_HOSTS", "when you application is behind a reverse proxy. # #", "[ \"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ], }, }, ] class", "\"NAME\": ( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ), }, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},", "\"PASSWORD\": values.Value( \"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None ), \"HOST\": values.Value( \"localhost\", environ_name=\"DB_HOST\",", "\"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES = [ { \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",", "= [ \"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\", \"marion\",", "\"default\": { \"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None, ), \"NAME\": values.Value(\"marion\",", "\"OPTIONS\": { \"context_processors\": [ \"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ], },", "] # Application ROOT_URLCONF = \"urls\" WSGI_APPLICATION = \"wsgi.application\" #", "= [ { \"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\": [], \"APP_DIRS\": True, \"OPTIONS\":", "\"\"\" This is the base configuration every configuration (aka environnement)", "= [] SECRET_KEY = values.Value(None) # SECURE_PROXY_SSL_HEADER allows to fix", "# # In other cases, you should comment the following", "- Your proxy sets the X-Forwarded-Proto header and sends it", "\"\"\" Django settings for marion project. \"\"\" from pathlib import", "Django app is behind a proxy. # - your proxy", "(CSS, JavaScript, Images) STATIC_URL = \"/static/\" STATIC_ROOT = DATA_DIR.joinpath(\"static\") MEDIA_URL", "other cases, you should comment the following line to avoid", "\"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES = [", "also want to override default configuration by setting the following", "configure third-party applications by creating a configuration mixins in ./configurations", "marion project. \"\"\" from pathlib import Path from tempfile import", "# - your Django app is behind a proxy. #", "# - your proxy strips the X-Forwarded-Proto header from all", "proxy. # - your proxy strips the X-Forwarded-Proto header from", "import Path from tempfile import mkdtemp from configurations import Configuration,", "by creating a configuration mixins in ./configurations and compose the", "environ_name=\"DB_PORT\", environ_prefix=None), } } # Static files (CSS, JavaScript, Images)", "security issues. SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS = [ {", "requests # - Your proxy sets the X-Forwarded-Proto header and", "want to override default configuration by setting the following environment", "HttpRequest # object when you application is behind a reverse", "# Database DATABASES = { \"default\": { \"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\",", "inherit from. It is recommended to configure third-party applications by", "# In other cases, you should comment the following line", "* DJANGO_SECRET_KEY You may also want to override default configuration", "AUTH_PASSWORD_VALIDATORS = [ { \"NAME\": ( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ), },", "LANGUAGE_CODE = \"en-us\" TIME_ZONE = \"UTC\" USE_I18N = True USE_L10N", "environ_name=\"DB_HOST\", environ_prefix=None ), \"PORT\": values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None), } } #", "Path(\"/data\") # pylint: disable=no-init class Base(Configuration): \"\"\" This is the", "configuration with those mixins. It depends on an environment variable", "], }, }, ] class Development(Base): \"\"\" Development environment settings", "# Application definition INSTALLED_APPS = [ \"django.contrib.admin\", \"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\",", "behind a proxy. # - your proxy strips the X-Forwarded-Proto", "[ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES", "\"rest_framework\", \"marion\", ] MIDDLEWARE = [ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\",", "\"UTC\" USE_I18N = True USE_L10N = True USE_TZ = True", "environ_name=\"DB_NAME\", environ_prefix=None), \"USER\": values.Value(\"fun\", environ_name=\"DB_USER\", environ_prefix=None), \"PASSWORD\": values.Value( \"pass\", environ_name=\"DB_PASSWORD\",", "True and configure the server to respond from all hosts.", "mixins. It depends on an environment variable that SHOULD be", "\"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None, ), \"NAME\": values.Value(\"marion\", environ_name=\"DB_NAME\", environ_prefix=None), \"USER\": values.Value(\"fun\",", "depends on an environment variable that SHOULD be defined: *", "# # Keep this SECURE_PROXY_SSL_HEADER configuration only if : #", "sends it to Django # # In other cases, you", "you application is behind a reverse proxy. # # Keep", "\"\"\" from pathlib import Path from tempfile import mkdtemp from", "# Application ROOT_URLCONF = \"urls\" WSGI_APPLICATION = \"wsgi.application\" # Database", "= True # Application definition INSTALLED_APPS = [ \"django.contrib.admin\", \"django.contrib.auth\",", "values.Value( \"localhost\", environ_name=\"DB_HOST\", environ_prefix=None ), \"PORT\": values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None), }", "= True ALLOWED_HOSTS = [\"*\"] ROOT_URLCONF = \"urls.debug\" # Application", "may also want to override default configuration by setting the", "Static files (CSS, JavaScript, Images) STATIC_URL = \"/static/\" STATIC_ROOT =", "on an environment variable that SHOULD be defined: * DJANGO_SECRET_KEY", "\"/media/\" MEDIA_ROOT = DATA_DIR.joinpath(\"media\") # Internationalization LANGUAGE_CODE = \"en-us\" TIME_ZONE", "\"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] # Application ROOT_URLCONF =", "\"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] # Application ROOT_URLCONF = \"urls\" WSGI_APPLICATION = \"wsgi.application\"", "is behind a reverse proxy. # # Keep this SECURE_PROXY_SSL_HEADER", "class Test(Base): \"\"\"Test environment settings\"\"\" MEDIA_ROOT = Path(mkdtemp()) ROOT_URLCONF =", "= False # Security ALLOWED_HOSTS = [] SECRET_KEY = values.Value(None)", "SECURE_PROXY_SSL_HEADER allows to fix the scheme in Django's HttpRequest #", "= { \"default\": { \"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None, ),", "DATABASES = { \"default\": { \"ENGINE\": values.Value( \"django.db.backends.postgresql_psycopg2\", environ_name=\"DB_ENGINE\", environ_prefix=None,", "\"en-us\" TIME_ZONE = \"UTC\" USE_I18N = True USE_L10N = True", "STATIC_ROOT = DATA_DIR.joinpath(\"static\") MEDIA_URL = \"/media/\" MEDIA_ROOT = DATA_DIR.joinpath(\"media\") #", "to avoid security issues. SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS =", "override default configuration by setting the following environment variables: *", "import Configuration, values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR = Path(\"/data\") #", "avoid security issues. SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS = [", "Django's HttpRequest # object when you application is behind a", "your Django app is behind a proxy. # - your", "\"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ), }, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},", "Development(Base): \"\"\" Development environment settings We set DEBUG to True", "MIDDLEWARE = [ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\",", "JavaScript, Images) STATIC_URL = \"/static/\" STATIC_ROOT = DATA_DIR.joinpath(\"static\") MEDIA_URL =", "SECURE_PROXY_SSL_HEADER configuration only if : # - your Django app", "DATA_DIR.joinpath(\"media\") # Internationalization LANGUAGE_CODE = \"en-us\" TIME_ZONE = \"UTC\" USE_I18N", "INSTALLED_APPS = Base.INSTALLED_APPS + [ \"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\"", "values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR = Path(\"/data\") # pylint: disable=no-init", "TEMPLATES = [ { \"BACKEND\": \"django.template.backends.django.DjangoTemplates\", \"DIRS\": [], \"APP_DIRS\": True,", "header from all incoming requests # - Your proxy sets", "is recommended to configure third-party applications by creating a configuration", "\"\"\" DEBUG = False # Security ALLOWED_HOSTS = [] SECRET_KEY", "an environment variable that SHOULD be defined: * DJANGO_SECRET_KEY You", "\"/static/\" STATIC_ROOT = DATA_DIR.joinpath(\"static\") MEDIA_URL = \"/media/\" MEDIA_ROOT = DATA_DIR.joinpath(\"media\")", "In other cases, you should comment the following line to", "WSGI_APPLICATION = \"wsgi.application\" # Database DATABASES = { \"default\": {", "Internationalization LANGUAGE_CODE = \"en-us\" TIME_ZONE = \"UTC\" USE_I18N = True", "ALLOWED_HOSTS = [\"*\"] ROOT_URLCONF = \"urls.debug\" # Application definition INSTALLED_APPS", "DEBUG = False # Security ALLOWED_HOSTS = [] SECRET_KEY =", "= [ { \"NAME\": ( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ), }, {\"NAME\":", "{\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] # Application ROOT_URLCONF = \"urls\"", "# SECURE_PROXY_SSL_HEADER allows to fix the scheme in Django's HttpRequest", "and configure the server to respond from all hosts. \"\"\"", "cases, you should comment the following line to avoid security", "\"django.contrib.messages.context_processors.messages\", ], }, }, ] class Development(Base): \"\"\" Development environment", "values.Value( \"pass\", environ_name=\"DB_PASSWORD\", environ_prefix=None ), \"HOST\": values.Value( \"localhost\", environ_name=\"DB_HOST\", environ_prefix=None", "[], \"APP_DIRS\": True, \"OPTIONS\": { \"context_processors\": [ \"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\",", "= values.Value(None) # SECURE_PROXY_SSL_HEADER allows to fix the scheme in", "[ { \"NAME\": ( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ), }, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},", "\"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES = [ { \"BACKEND\":", "values.Value(None) # SECURE_PROXY_SSL_HEADER allows to fix the scheme in Django's", "\"https\") AUTH_PASSWORD_VALIDATORS = [ { \"NAME\": ( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\" ),", "= [ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ]", "settings We set DEBUG to True and configure the server", "True USE_TZ = True # Application definition INSTALLED_APPS = [", "DEBUG to True and configure the server to respond from", "a proxy. # - your proxy strips the X-Forwarded-Proto header", "from configurations import Configuration, values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR =", "= \"en-us\" TIME_ZONE = \"UTC\" USE_I18N = True USE_L10N =", "this SECURE_PROXY_SSL_HEADER configuration only if : # - your Django", "), \"HOST\": values.Value( \"localhost\", environ_name=\"DB_HOST\", environ_prefix=None ), \"PORT\": values.Value(5432, environ_name=\"DB_PORT\",", "= \"howard.defaults.DocumentIssuerChoices\" class Test(Base): \"\"\"Test environment settings\"\"\" MEDIA_ROOT = Path(mkdtemp())", "<filename>sandbox/settings.py<gh_stars>0 \"\"\" Django settings for marion project. \"\"\" from pathlib", "= True USE_TZ = True # Application definition INSTALLED_APPS =", "is behind a proxy. # - your proxy strips the", "\"urls.debug\" # Application definition INSTALLED_APPS = Base.INSTALLED_APPS + [ \"howard\",", "= \"UTC\" USE_I18N = True USE_L10N = True USE_TZ =", "Application definition INSTALLED_APPS = Base.INSTALLED_APPS + [ \"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS", "strips the X-Forwarded-Proto header from all incoming requests # -", "base configuration every configuration (aka environnement) should inherit from. It", "TIME_ZONE = \"UTC\" USE_I18N = True USE_L10N = True USE_TZ", "set DEBUG to True and configure the server to respond", "\"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\", \"django.middleware.common.CommonMiddleware\", \"django.middleware.csrf.CsrfViewMiddleware\", \"django.contrib.auth.middleware.AuthenticationMiddleware\", \"django.contrib.messages.middleware.MessageMiddleware\", \"django.middleware.clickjacking.XFrameOptionsMiddleware\", ] TEMPLATES =", "a configuration mixins in ./configurations and compose the Base configuration", "Base.INSTALLED_APPS + [ \"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\" class Test(Base):", "MEDIA_URL = \"/media/\" MEDIA_ROOT = DATA_DIR.joinpath(\"media\") # Internationalization LANGUAGE_CODE =", "(\"HTTP_X_FORWARDED_PROTO\", \"https\") AUTH_PASSWORD_VALIDATORS = [ { \"NAME\": ( \"django.contrib.auth.password_validation.\" \"UserAttributeSimilarityValidator\"", "tempfile import mkdtemp from configurations import Configuration, values BASE_DIR =", "allows to fix the scheme in Django's HttpRequest # object", "Path(__file__).parent.resolve() DATA_DIR = Path(\"/data\") # pylint: disable=no-init class Base(Configuration): \"\"\"", "), }, {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"}, {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"}, ] #", "\"HOST\": values.Value( \"localhost\", environ_name=\"DB_HOST\", environ_prefix=None ), \"PORT\": values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None),", "only if : # - your Django app is behind", "\"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\", \"marion\", ] MIDDLEWARE = [ \"django.middleware.security.SecurityMiddleware\", \"django.contrib.sessions.middleware.SessionMiddleware\",", "it to Django # # In other cases, you should", "True, \"OPTIONS\": { \"context_processors\": [ \"django.template.context_processors.debug\", \"django.template.context_processors.request\", \"django.contrib.auth.context_processors.auth\", \"django.contrib.messages.context_processors.messages\", ],", "\"django.contrib.auth\", \"django.contrib.contenttypes\", \"django.contrib.sessions\", \"django.contrib.messages\", \"django.contrib.staticfiles\", \"rest_framework\", \"marion\", ] MIDDLEWARE =", "= \"urls\" WSGI_APPLICATION = \"wsgi.application\" # Database DATABASES = {", "\"PORT\": values.Value(5432, environ_name=\"DB_PORT\", environ_prefix=None), } } # Static files (CSS,", "object when you application is behind a reverse proxy. #", "+ [ \"howard\", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = \"howard.defaults.DocumentIssuerChoices\" class Test(Base): \"\"\"Test", "those mixins. It depends on an environment variable that SHOULD", "[] SECRET_KEY = values.Value(None) # SECURE_PROXY_SSL_HEADER allows to fix the" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "License, Version 2.0 # (the \"License\"); you may not use", "may not use this file except in compliance with #", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "and # limitations under the License. # import grpc from", "skywalking.profile import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel: grpc.Channel): self.service_stub", "contributor license agreements. See the NOTICE file distributed with #", "agreed to in writing, software # distributed under the License", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "to the Apache Software Foundation (ASF) under one or more", "beats, [%s], [%s]', config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, ))", "skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking import", "class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel: grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel) def", "grpc.Channel): self.report_stub = LogReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService):", "def __init__(self, channel: grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel) def report(self, generator):", "def report(self, generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def __init__(self, channel: grpc.Channel):", "distributed under the License is distributed on an \"AS IS\"", "2.0 # (the \"License\"); you may not use this file", "config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService): def", "from skywalking.loggings import logger from skywalking.profile import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient):", "the specific language governing permissions and # limitations under the", "from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking", "skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import", "__init__(self, channel: grpc.Channel): self.task_stub = ProfileTaskStub(channel) def do_query(self): query =", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def __init__(self, channel: grpc.Channel): self.report_stub =", "from skywalking.profile import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel: grpc.Channel):", "from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2", "under the Apache License, Version 2.0 # (the \"License\"); you", "# the License. You may obtain a copy of the", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "self.report_stub = LogReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "this work for additional information regarding copyright ownership. # The", "Licensed to the Apache Software Foundation (ASF) under one or", "[%s], [%s]', config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, )) class", "skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2", "file except in compliance with # the License. You may", "import config from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \\ LogDataReportService", "this file except in compliance with # the License. You", "grpc.Channel): self.task_stub = ProfileTaskStub(channel) def do_query(self): query = ProfileTaskCommandQuery( service=config.service_name,", "permissions and # limitations under the License. # import grpc", "report(self, generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def __init__(self, channel: grpc.Channel): self.report_stub", "# contributor license agreements. See the NOTICE file distributed with", "writing, software # distributed under the License is distributed on", "ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from", "# this work for additional information regarding copyright ownership. #", "in writing, software # distributed under the License is distributed", "value='Python')], )) def send_heart_beat(self): logger.debug( 'service heart beats, [%s], [%s]',", "from skywalking.command import command_service from skywalking.loggings import logger from skywalking.profile", "you may not use this file except in compliance with", "skywalking.command import command_service from skywalking.loggings import logger from skywalking.profile import", "with # the License. You may obtain a copy of", "properties=[KeyStringValuePair(key='language', value='Python')], )) def send_heart_beat(self): logger.debug( 'service heart beats, [%s],", "the License. You may obtain a copy of the License", "skywalking.loggings import logger from skywalking.profile import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def", "this file to You under the Apache License, Version 2.0", "import logger from skywalking.profile import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self,", "import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel: grpc.Channel): self.service_stub =", "Apache License, Version 2.0 # (the \"License\"); you may not", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub", "serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel: grpc.Channel): self.report_stub =", "\\ LogDataReportService from skywalking.command import command_service from skywalking.loggings import logger", "send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], )) def send_heart_beat(self): logger.debug(", "def send_heart_beat(self): logger.debug( 'service heart beats, [%s], [%s]', config.service_name, config.service_instance,", "import grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub", "= ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], ))", "skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \\ LogDataReportService from skywalking.command import", "GrpcLogDataReportService(LogDataReportService): def __init__(self, channel: grpc.Channel): self.report_stub = LogReportServiceStub(channel) def report(self,", "def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], )) def send_heart_beat(self):", "CONDITIONS OF ANY KIND, either express or implied. # See", "Version 2.0 # (the \"License\"); you may not use this", "channel: grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "channel: grpc.Channel): self.task_stub = ProfileTaskStub(channel) def do_query(self): query = ProfileTaskCommandQuery(", "profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel: grpc.Channel): self.service_stub = ManagementServiceStub(channel)", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService):", "ProfileTaskStub(channel) def do_query(self): query = ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance, lastCommandTime=profile_task_execution_service.get_last_command_create_time() )", "from skywalking import config from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import", "agreements. See the NOTICE file distributed with # this work", "logger.debug( 'service heart beats, [%s], [%s]', config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg(", "def __init__(self, channel: grpc.Channel): self.task_stub = ProfileTaskStub(channel) def do_query(self): query", "Foundation (ASF) under one or more # contributor license agreements.", "skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import", "InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "channel: grpc.Channel): self.report_stub = LogReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class", "report(self, generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel: grpc.Channel): self.task_stub", "ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \\ LogDataReportService from skywalking.command import command_service from", "from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \\ LogDataReportService from skywalking.command", "InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery", "self.report_stub = TraceSegmentReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def", "under the License is distributed on an \"AS IS\" BASIS,", "\"License\"); you may not use this file except in compliance", "except in compliance with # the License. You may obtain", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'service heart beats, [%s], [%s]', config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name,", "License for the specific language governing permissions and # limitations", "distributed with # this work for additional information regarding copyright", "information regarding copyright ownership. # The ASF licenses this file", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "grpc.Channel): self.service_stub = ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language',", "file to You under the Apache License, Version 2.0 #", "GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel: grpc.Channel): self.service_stub = ManagementServiceStub(channel) def send_instance_props(self):", "self.service_stub = ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')],", "[%s]', config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService):", "the License for the specific language governing permissions and #", "regarding copyright ownership. # The ASF licenses this file to", "TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties", "TraceSegmentReportService, ProfileTaskChannelService, \\ LogDataReportService from skywalking.command import command_service from skywalking.loggings", "See the NOTICE file distributed with # this work for", "under one or more # contributor license agreements. See the", "command_service from skywalking.loggings import logger from skywalking.profile import profile_task_execution_service class", "either express or implied. # See the License for the", "skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import", "from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc", "self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel: grpc.Channel):", "(the \"License\"); you may not use this file except in", "config from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \\ LogDataReportService from", "OR CONDITIONS OF ANY KIND, either express or implied. #", "self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], )) def send_heart_beat(self): logger.debug( 'service", "to You under the Apache License, Version 2.0 # (the", "NOTICE file distributed with # this work for additional information", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "send_heart_beat(self): logger.debug( 'service heart beats, [%s], [%s]', config.service_name, config.service_instance, )", "query = ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance, lastCommandTime=profile_task_execution_service.get_last_command_create_time() ) commands = self.task_stub.getProfileTaskCommands(query)", "the License is distributed on an \"AS IS\" BASIS, #", "class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel: grpc.Channel): self.task_stub = ProfileTaskStub(channel) def", "or more # contributor license agreements. See the NOTICE file", "ownership. # The ASF licenses this file to You under", "import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub", "skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc", "software # distributed under the License is distributed on an", "the License. # import grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from", "with # this work for additional information regarding copyright ownership.", "You under the Apache License, Version 2.0 # (the \"License\");", "import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking import config", "heart beats, [%s], [%s]', config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance,", "= ProfileTaskStub(channel) def do_query(self): query = ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance, lastCommandTime=profile_task_execution_service.get_last_command_create_time()", "LogReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel:", "ProfileTaskStub from skywalking import config from skywalking.client import ServiceManagementClient, TraceSegmentReportService,", "def __init__(self, channel: grpc.Channel): self.service_stub = ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties(", "# # Unless required by applicable law or agreed to", "service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], )) def send_heart_beat(self): logger.debug( 'service heart", "LogDataReportService from skywalking.command import command_service from skywalking.loggings import logger from", "compliance with # the License. You may obtain a copy", "licenses this file to You under the Apache License, Version", "# import grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import", "logger from skywalking.profile import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "TraceSegmentReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def __init__(self, channel:", "file distributed with # this work for additional information regarding", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from", "more # contributor license agreements. See the NOTICE file distributed", "skywalking import config from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \\", "__init__(self, channel: grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator)", "from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from", "for additional information regarding copyright ownership. # The ASF licenses", "law or agreed to in writing, software # distributed under", "do_query(self): query = ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance, lastCommandTime=profile_task_execution_service.get_last_command_create_time() ) commands =", "The ASF licenses this file to You under the Apache", "GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel: grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel) def report(self,", "= LogReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self,", "ASF licenses this file to You under the Apache License,", "governing permissions and # limitations under the License. # import", "self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def __init__(self, channel: grpc.Channel): self.report_stub = LogReportServiceStub(channel)", "Apache Software Foundation (ASF) under one or more # contributor", "Software Foundation (ASF) under one or more # contributor license", "# limitations under the License. # import grpc from skywalking.protocol.common.Common_pb2", "import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import", "def __init__(self, channel: grpc.Channel): self.report_stub = LogReportServiceStub(channel) def report(self, generator):", "implied. # See the License for the specific language governing", "# The ASF licenses this file to You under the", "limitations under the License. # import grpc from skywalking.protocol.common.Common_pb2 import", ")) class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel: grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel)", "LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub", "class GrpcLogDataReportService(LogDataReportService): def __init__(self, channel: grpc.Channel): self.report_stub = LogReportServiceStub(channel) def", "__init__(self, channel: grpc.Channel): self.report_stub = LogReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator)", "service=config.service_name, serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel: grpc.Channel): self.report_stub", "ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking import config from", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg,", "KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from", ") self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel:", "generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel: grpc.Channel): self.task_stub =", "the NOTICE file distributed with # this work for additional", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "not use this file except in compliance with # the", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "(ASF) under one or more # contributor license agreements. See", "the Apache License, Version 2.0 # (the \"License\"); you may", "use this file except in compliance with # the License.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "import ProfileTaskStub from skywalking import config from skywalking.client import ServiceManagementClient,", "import command_service from skywalking.loggings import logger from skywalking.profile import profile_task_execution_service", "serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], )) def send_heart_beat(self): logger.debug( 'service heart beats,", "= TraceSegmentReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def __init__(self,", "under the License. # import grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "ProfileTaskChannelService, \\ LogDataReportService from skywalking.command import command_service from skywalking.loggings import", "def report(self, generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel: grpc.Channel):", "self.task_stub = ProfileTaskStub(channel) def do_query(self): query = ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel: grpc.Channel): self.task_stub = ProfileTaskStub(channel) def do_query(self):", "to in writing, software # distributed under the License is", "from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking import config from skywalking.client", "= ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance, lastCommandTime=profile_task_execution_service.get_last_command_create_time() ) commands = self.task_stub.getProfileTaskCommands(query) command_service.receive_command(commands)", "# (the \"License\"); you may not use this file except", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "the Apache Software Foundation (ASF) under one or more #", "License. You may obtain a copy of the License at", "skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking import config from skywalking.client import", "channel: grpc.Channel): self.service_stub = ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance,", "You may obtain a copy of the License at #", "# Licensed to the Apache Software Foundation (ASF) under one", "additional information regarding copyright ownership. # The ASF licenses this", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from", "from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc", "required by applicable law or agreed to in writing, software", "ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], )) def", "one or more # contributor license agreements. See the NOTICE", "work for additional information regarding copyright ownership. # The ASF", "in compliance with # the License. You may obtain a", "import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \\ LogDataReportService from skywalking.command import command_service", "config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# # Licensed to the Apache Software Foundation (ASF) under", ")) def send_heart_beat(self): logger.debug( 'service heart beats, [%s], [%s]', config.service_name,", "copyright ownership. # The ASF licenses this file to You", "self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel: grpc.Channel): self.task_stub = ProfileTaskStub(channel)", "class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel: grpc.Channel): self.service_stub = ManagementServiceStub(channel) def", "def do_query(self): query = ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance, lastCommandTime=profile_task_execution_service.get_last_command_create_time() ) commands", "license agreements. See the NOTICE file distributed with # this", "__init__(self, channel: grpc.Channel): self.service_stub = ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name,", "License. # import grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc" ]
[ "network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'),", "), migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='payment',", "('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10),", "decimal_places=1, max_digits=10, null=True), ), migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100, null=True),", "class Migration(migrations.Migration): dependencies = [ ('coingate', '0003_auto_20200207_1513'), ] operations =", "null=True), ), migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='payment',", "'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10), ), migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new',", "invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain network confirmation'), ('paid',", "operations = [ migrations.RemoveField( model_name='payment', name='token', ), migrations.AddField( model_name='payment', name='expire_at',", "max_length=10), ), migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC',", "name='status', field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting", "2020-02-07 19:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "Migration(migrations.Migration): dependencies = [ ('coingate', '0003_auto_20200207_1513'), ] operations = [", "), migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True), ), migrations.AddField(", "('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10),", "'ETH')], default='USD', max_length=10), ), migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR',", "[ migrations.RemoveField( model_name='payment', name='token', ), migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True),", "max_digits=10, null=True), ), migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100, null=True), ),", "= [ ('coingate', '0003_auto_20200207_1513'), ] operations = [ migrations.RemoveField( model_name='payment',", "null=True), ), migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField(", "'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10), ), migrations.AlterField( model_name='payment',", "] operations = [ migrations.RemoveField( model_name='payment', name='token', ), migrations.AddField( model_name='payment',", "migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD',", "models class Migration(migrations.Migration): dependencies = [ ('coingate', '0003_auto_20200207_1513'), ] operations", "('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10), ), migrations.AlterField( model_name='payment', name='receive_currency',", "'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC',", "created invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain network confirmation'),", "by Django 3.0.3 on 2020-02-07 19:59 from django.db import migrations,", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coingate',", "('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10), ), migrations.AlterField( model_name='payment', name='status',", "'Newly created invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain network", "'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10), ), migrations.AlterField( model_name='payment',", "model_name='payment', name='token', ), migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField(", "field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')],", "null=True), ), migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC',", "), migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='payment', name='price_currency',", "confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded',", "model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True), ), migrations.AddField( model_name='payment', name='payment_address',", "name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH',", "name='payment_address', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True),", "('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10), ), migrations.AlterField(", "dependencies = [ ('coingate', '0003_auto_20200207_1513'), ] operations = [ migrations.RemoveField(", "), migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'),", "migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'),", "('ETH', 'ETH')], default='BTC', max_length=10), ), migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly", "'0003_auto_20200207_1513'), ] operations = [ migrations.RemoveField( model_name='payment', name='token', ), migrations.AddField(", "('confirming', 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired',", "model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'), ('confirming',", "null=True), ), migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True), ),", "'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10), ),", "('ETH', 'ETH')], default='USD', max_length=10), ), migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'),", "field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True), ), migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100,", "model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'),", "migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True), ), migrations.AddField( model_name='payment',", "), migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('coingate', '0003_auto_20200207_1513'),", "max_length=10), ), migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending',", "name='expire_at', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10,", "field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True),", "('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'),", "19:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')],", "migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC',", "model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'),", "model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True,", "), migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='payment', name='pay_amount',", "migrations.RemoveField( model_name='payment', name='token', ), migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True), ),", "model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1,", "name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR',", "'Awaiting payment'), ('confirming', 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid',", "max_length=100, null=True), ), migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField(", "payment'), ('confirming', 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'),", "name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True), ), migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True,", "'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='new', max_length=10), ),", "Django 3.0.3 on 2020-02-07 19:59 from django.db import migrations, models", "blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled',", "field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ),", "[ ('coingate', '0003_auto_20200207_1513'), ] operations = [ migrations.RemoveField( model_name='payment', name='token',", "# Generated by Django 3.0.3 on 2020-02-07 19:59 from django.db", "field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'),", "default='USD', max_length=10), ), migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'),", "model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'),", "= [ migrations.RemoveField( model_name='payment', name='token', ), migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True,", "('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10), ), migrations.AlterField(", "('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='new', max_length=10), ), ]", "Generated by Django 3.0.3 on 2020-02-07 19:59 from django.db import", "3.0.3 on 2020-02-07 19:59 from django.db import migrations, models class", "field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain", "name='token', ), migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='payment',", "'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='new',", "'ETH')], default='BTC', max_length=10), ), migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly created", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD',", "'LTC'), ('ETH', 'ETH')], default='USD', max_length=10), ), migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD',", "), migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting", "migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC',", "migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True,", "'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'),", "'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10), ),", "migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='payment', name='created_at',", "on 2020-02-07 19:59 from django.db import migrations, models class Migration(migrations.Migration):", "('coingate', '0003_auto_20200207_1513'), ] operations = [ migrations.RemoveField( model_name='payment', name='token', ),", "name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH',", "default='BTC', max_length=10), ), migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly created invoice'),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('coingate', '0003_auto_20200207_1513'), ]", "('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='new', max_length=10)," ]
[ "instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load) db = SQLAlchemy(app) @app.before_first_request def create_table(): db.create_all()", "SQLAlchemy(app) @app.before_first_request def create_table(): db.create_all() from space_trace import views, cli", "app = Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load) db = SQLAlchemy(app) @app.before_first_request", "toml from flask import Flask from flask_sqlalchemy import SQLAlchemy app", "SQLAlchemy app = Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load) db = SQLAlchemy(app)", "import toml from flask import Flask from flask_sqlalchemy import SQLAlchemy", "load=toml.load) db = SQLAlchemy(app) @app.before_first_request def create_table(): db.create_all() from space_trace", "= SQLAlchemy(app) @app.before_first_request def create_table(): db.create_all() from space_trace import views,", "Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load) db = SQLAlchemy(app) @app.before_first_request def create_table():", "app.config.from_file(\"config.toml\", load=toml.load) db = SQLAlchemy(app) @app.before_first_request def create_table(): db.create_all() from", "from flask_sqlalchemy import SQLAlchemy app = Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load)", "= Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load) db = SQLAlchemy(app) @app.before_first_request def", "Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\",", "flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__,", "flask_sqlalchemy import SQLAlchemy app = Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load) db", "from flask import Flask from flask_sqlalchemy import SQLAlchemy app =", "db = SQLAlchemy(app) @app.before_first_request def create_table(): db.create_all() from space_trace import", "import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__, instance_relative_config=True)", "import SQLAlchemy app = Flask(__name__, instance_relative_config=True) app.config.from_file(\"config.toml\", load=toml.load) db =" ]
[ "distribution_params): distribution_upper = distribution.upper() if not Distribution[distribution_upper] or Distribution[distribution_upper] is", "UniformDist class Distribution(Enum): UNIFORM = 0 GAUSSIAN = 1 POISSON", "is None: raise IndexError('Distribution not supported `{}`. Try one of:", "in Distribution])) if Distribution[distribution_upper] == Distribution.UNIFORM: if not distribution_params: distribution_params", ".UniformDist import UniformDist class Distribution(Enum): UNIFORM = 0 GAUSSIAN =", "0 GAUSSIAN = 1 POISSON = 2 @staticmethod def determine_distribution(distribution,", "distribution.upper() if not Distribution[distribution_upper] or Distribution[distribution_upper] is None: raise IndexError('Distribution", "UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] == Distribution.GAUSSIAN: if not distribution_params: distribution_params =", "= 0.5 return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] == Distribution.GAUSSIAN: if not", "return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper] is Distribution.POISSON: pass raise IndexError('Distribution", "== Distribution.GAUSSIAN: if not distribution_params: distribution_params = [0., 1.] return", "determine_distribution(distribution, distribution_params): distribution_upper = distribution.upper() if not Distribution[distribution_upper] or Distribution[distribution_upper]", "elem in Distribution])) if Distribution[distribution_upper] == Distribution.UNIFORM: if not distribution_params:", "distribution_params = [0., 1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper] is", "if Distribution[distribution_upper] == Distribution.UNIFORM: if not distribution_params: distribution_params = 0.5", "distribution_upper = distribution.upper() if not Distribution[distribution_upper] or Distribution[distribution_upper] is None:", "from .UniformDist import UniformDist class Distribution(Enum): UNIFORM = 0 GAUSSIAN", "Try one of: {}'.format( distribution, [(elem.value, elem.name) for elem in", "if Distribution[distribution_upper] is Distribution.POISSON: pass raise IndexError('Distribution not supported `{}`.", "Distribution.POISSON: pass raise IndexError('Distribution not supported `{}`. Try one of:", "UNIFORM = 0 GAUSSIAN = 1 POISSON = 2 @staticmethod", "if not distribution_params: distribution_params = [0., 1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1]))", "2 @staticmethod def determine_distribution(distribution, distribution_params): distribution_upper = distribution.upper() if not", "Distribution.GAUSSIAN: if not distribution_params: distribution_params = [0., 1.] return NormalDist(loc=float(distribution_params[0]),", "not distribution_params: distribution_params = [0., 1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if", "1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper] is Distribution.POISSON: pass raise", "if not Distribution[distribution_upper] or Distribution[distribution_upper] is None: raise IndexError('Distribution not", "Distribution(Enum): UNIFORM = 0 GAUSSIAN = 1 POISSON = 2", "pass raise IndexError('Distribution not supported `{}`. Try one of: {}'.format(", "if Distribution[distribution_upper] == Distribution.GAUSSIAN: if not distribution_params: distribution_params = [0.,", "coding: utf-8 -*- import logging from enum import Enum from", "Distribution])) if Distribution[distribution_upper] == Distribution.UNIFORM: if not distribution_params: distribution_params =", "`{}`. Try one of: {}'.format( distribution, [(elem.value, elem.name) for elem", "distribution, [(elem.value, elem.name) for elem in Distribution])) if Distribution[distribution_upper] ==", "or Distribution[distribution_upper] is None: raise IndexError('Distribution not supported `{}`. Try", "Distribution[distribution_upper] == Distribution.UNIFORM: if not distribution_params: distribution_params = 0.5 return", "distribution_params: distribution_params = [0., 1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper]", "Distribution.UNIFORM: if not distribution_params: distribution_params = 0.5 return UniformDist(rate=float(distribution_params)) if", "import UniformDist class Distribution(Enum): UNIFORM = 0 GAUSSIAN = 1", "NormalDist from .UniformDist import UniformDist class Distribution(Enum): UNIFORM = 0", "scale=float(distribution_params[1])) if Distribution[distribution_upper] is Distribution.POISSON: pass raise IndexError('Distribution not supported", "for elem in Distribution])) if Distribution[distribution_upper] == Distribution.UNIFORM: if not", "1 POISSON = 2 @staticmethod def determine_distribution(distribution, distribution_params): distribution_upper =", "Distribution[distribution_upper] or Distribution[distribution_upper] is None: raise IndexError('Distribution not supported `{}`.", "-*- coding: utf-8 -*- import logging from enum import Enum", "[(elem.value, elem.name) for elem in Distribution])) if Distribution[distribution_upper] == Distribution.UNIFORM:", "None: raise IndexError('Distribution not supported `{}`. Try one of: {}'.format(", "if not distribution_params: distribution_params = 0.5 return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper]", "== Distribution.UNIFORM: if not distribution_params: distribution_params = 0.5 return UniformDist(rate=float(distribution_params))", "distribution_params: distribution_params = 0.5 return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] == Distribution.GAUSSIAN:", "raise IndexError('Distribution not supported `{}`. Try one of: {}'.format( distribution,", "# -*- coding: utf-8 -*- import logging from enum import", "Distribution[distribution_upper] == Distribution.GAUSSIAN: if not distribution_params: distribution_params = [0., 1.]", "<filename>ng/distributions/Distribution.py<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging", "from enum import Enum from .NormalDist import NormalDist from .UniformDist", "not Distribution[distribution_upper] or Distribution[distribution_upper] is None: raise IndexError('Distribution not supported", "class Distribution(Enum): UNIFORM = 0 GAUSSIAN = 1 POISSON =", "= 0 GAUSSIAN = 1 POISSON = 2 @staticmethod def", "{}'.format( distribution, [(elem.value, elem.name) for elem in Distribution])) if Distribution[distribution_upper]", "of: {}'.format( distribution, [(elem.value, elem.name) for elem in Distribution])) if", "0.5 return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] == Distribution.GAUSSIAN: if not distribution_params:", "one of: {}'.format( distribution, [(elem.value, elem.name) for elem in Distribution]))", "from .NormalDist import NormalDist from .UniformDist import UniformDist class Distribution(Enum):", "NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper] is Distribution.POISSON: pass raise IndexError('Distribution not", "= 2 @staticmethod def determine_distribution(distribution, distribution_params): distribution_upper = distribution.upper() if", "POISSON = 2 @staticmethod def determine_distribution(distribution, distribution_params): distribution_upper = distribution.upper()", "Distribution[distribution_upper] is Distribution.POISSON: pass raise IndexError('Distribution not supported `{}`. Try", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging from", "IndexError('Distribution not supported `{}`. Try one of: {}'.format( distribution, [(elem.value,", "= 1 POISSON = 2 @staticmethod def determine_distribution(distribution, distribution_params): distribution_upper", "= [0., 1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper] is Distribution.POISSON:", "import logging from enum import Enum from .NormalDist import NormalDist", "enum import Enum from .NormalDist import NormalDist from .UniformDist import", "Distribution[distribution_upper] is None: raise IndexError('Distribution not supported `{}`. Try one", "import NormalDist from .UniformDist import UniformDist class Distribution(Enum): UNIFORM =", "logging from enum import Enum from .NormalDist import NormalDist from", "not distribution_params: distribution_params = 0.5 return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] ==", "distribution_params = 0.5 return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] == Distribution.GAUSSIAN: if", "utf-8 -*- import logging from enum import Enum from .NormalDist", "def determine_distribution(distribution, distribution_params): distribution_upper = distribution.upper() if not Distribution[distribution_upper] or", "Enum from .NormalDist import NormalDist from .UniformDist import UniformDist class", "= distribution.upper() if not Distribution[distribution_upper] or Distribution[distribution_upper] is None: raise", "[0., 1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper] is Distribution.POISSON: pass", "-*- import logging from enum import Enum from .NormalDist import", "not supported `{}`. Try one of: {}'.format( distribution, [(elem.value, elem.name)", "elem.name) for elem in Distribution])) if Distribution[distribution_upper] == Distribution.UNIFORM: if", "python3 # -*- coding: utf-8 -*- import logging from enum", "supported `{}`. Try one of: {}'.format( distribution, [(elem.value, elem.name) for", "is Distribution.POISSON: pass raise IndexError('Distribution not supported `{}`. Try one", "GAUSSIAN = 1 POISSON = 2 @staticmethod def determine_distribution(distribution, distribution_params):", "import Enum from .NormalDist import NormalDist from .UniformDist import UniformDist", ".NormalDist import NormalDist from .UniformDist import UniformDist class Distribution(Enum): UNIFORM", "return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] == Distribution.GAUSSIAN: if not distribution_params: distribution_params", "@staticmethod def determine_distribution(distribution, distribution_params): distribution_upper = distribution.upper() if not Distribution[distribution_upper]" ]
[ "sys filename = sys.argv[1] from_id = int(sys.argv[2]) to_id = int(sys.argv[2])", "filename = sys.argv[1] from_id = int(sys.argv[2]) to_id = int(sys.argv[2]) for", "int(sys.argv[2]) to_id = int(sys.argv[2]) for i in range(from_id, to_id +", "range(from_id, to_id + 1): sys.system(\"mv {0}.in{1} {0}{1}.in\".format(filename, i)) sys.system(\"mv {0}.out{1}", "in range(from_id, to_id + 1): sys.system(\"mv {0}.in{1} {0}{1}.in\".format(filename, i)) sys.system(\"mv", "import os import sys filename = sys.argv[1] from_id = int(sys.argv[2])", "+ 1): sys.system(\"mv {0}.in{1} {0}{1}.in\".format(filename, i)) sys.system(\"mv {0}.out{1} {0}{1}.out\".format(filename, i))", "= int(sys.argv[2]) to_id = int(sys.argv[2]) for i in range(from_id, to_id", "import sys filename = sys.argv[1] from_id = int(sys.argv[2]) to_id =", "sys.argv[1] from_id = int(sys.argv[2]) to_id = int(sys.argv[2]) for i in", "i in range(from_id, to_id + 1): sys.system(\"mv {0}.in{1} {0}{1}.in\".format(filename, i))", "os import sys filename = sys.argv[1] from_id = int(sys.argv[2]) to_id", "= int(sys.argv[2]) for i in range(from_id, to_id + 1): sys.system(\"mv", "to_id = int(sys.argv[2]) for i in range(from_id, to_id + 1):", "for i in range(from_id, to_id + 1): sys.system(\"mv {0}.in{1} {0}{1}.in\".format(filename,", "= sys.argv[1] from_id = int(sys.argv[2]) to_id = int(sys.argv[2]) for i", "to_id + 1): sys.system(\"mv {0}.in{1} {0}{1}.in\".format(filename, i)) sys.system(\"mv {0}.out{1} {0}{1}.out\".format(filename,", "from_id = int(sys.argv[2]) to_id = int(sys.argv[2]) for i in range(from_id,", "int(sys.argv[2]) for i in range(from_id, to_id + 1): sys.system(\"mv {0}.in{1}" ]
[ "path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/',", "views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication,", "= [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList,", "views urlpatterns = [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'),", "import views urlpatterns = [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken,", "<gh_stars>0 from django.urls import path from . import views urlpatterns", "django.urls import path from . import views urlpatterns = [", "[ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'),", "urlpatterns = [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/',", "path from . import views urlpatterns = [ path('apply/', views.FillPassApplication,", ". import views urlpatterns = [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>',", "views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'), path('check-pass-validity/', views.CheckPassValidity,", "name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'),", "name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'), path('check-pass-validity/', views.CheckPassValidity, name='check-pass-validity'),", "import path from . import views urlpatterns = [ path('apply/',", "path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'), path('check-pass-validity/', views.CheckPassValidity, name='check-pass-validity'), ]", "from . import views urlpatterns = [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'),", "from django.urls import path from . import views urlpatterns =", "path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/',", "path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'), path('check-pass-validity/',", "name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'),", "views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus," ]
[ "dcc.Textarea( id='textarea-example', value='Textarea content initialized\\nwith multiple lines of text', style={'width':", "html import dash_core_components as dcc app = dash.Dash(__name__) app.layout =", "id='textarea-example', value='Textarea content initialized\\nwith multiple lines of text', style={'width': '100%',", "'100%', 'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) @app.callback( Output('textarea-example-output',", "app.layout = html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content initialized\\nwith multiple lines", "= html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content initialized\\nwith multiple lines of", "'children'), [Input('textarea-example', 'value')] ) def update_output(value): return 'You have entered:", "dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content initialized\\nwith multiple", "Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')] ) def update_output(value): return 'You have", "dash from dash.dependencies import Input, Output import dash_html_components as html", "lines of text', style={'width': '100%', 'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace':", "import dash_core_components as dcc app = dash.Dash(__name__) app.layout = html.Div([", "dcc app = dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea( id='textarea-example', value='Textarea", "style={'whiteSpace': 'pre-line'}) ]) @app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')] ) def", "html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content initialized\\nwith multiple lines of text',", "import Input, Output import dash_html_components as html import dash_core_components as", "style={'width': '100%', 'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) @app.callback(", "multiple lines of text', style={'width': '100%', 'height': 300}, ), html.Div(id='textarea-example-output',", "[Input('textarea-example', 'value')] ) def update_output(value): return 'You have entered: \\n{}'.format(value)", "@app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')] ) def update_output(value): return 'You", "), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) @app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')]", "dash_html_components as html import dash_core_components as dcc app = dash.Dash(__name__)", "initialized\\nwith multiple lines of text', style={'width': '100%', 'height': 300}, ),", "dash_core_components as dcc app = dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea(", "]) @app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')] ) def update_output(value): return", "import dash from dash.dependencies import Input, Output import dash_html_components as", ") def update_output(value): return 'You have entered: \\n{}'.format(value) if __name__", "as dcc app = dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea( id='textarea-example',", "value='Textarea content initialized\\nwith multiple lines of text', style={'width': '100%', 'height':", "return 'You have entered: \\n{}'.format(value) if __name__ == '__main__': app.run_server(debug=True)", "content initialized\\nwith multiple lines of text', style={'width': '100%', 'height': 300},", "of text', style={'width': '100%', 'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'})", "import dash_html_components as html import dash_core_components as dcc app =", "'value')] ) def update_output(value): return 'You have entered: \\n{}'.format(value) if", "def update_output(value): return 'You have entered: \\n{}'.format(value) if __name__ ==", "dash.dependencies import Input, Output import dash_html_components as html import dash_core_components", "Input, Output import dash_html_components as html import dash_core_components as dcc", "update_output(value): return 'You have entered: \\n{}'.format(value) if __name__ == '__main__':", "from dash.dependencies import Input, Output import dash_html_components as html import", "app = dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content", "'pre-line'}) ]) @app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')] ) def update_output(value):", "Output import dash_html_components as html import dash_core_components as dcc app", "= dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content initialized\\nwith", "html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) @app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')] )", "300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) @app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example',", "text', style={'width': '100%', 'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ])", "'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) @app.callback( Output('textarea-example-output', 'children'),", "as html import dash_core_components as dcc app = dash.Dash(__name__) app.layout" ]
[ "headless=True) def test_no_chromedriver_path(): \"\"\"Assert error is raised if no chromedriver", "is raised if no chromedriver path is used\"\"\" with pytest.raises(TypeError):", "is raised if no chromedriver path is used\"\"\" with pytest.raises(WebDriverException):", "wrapped_driver import WrappedDriver def test_empty_chromedriver_path(): \"\"\"Assert error is raised if", "test_empty_chromedriver_path(): \"\"\"Assert error is raised if no chromedriver path is", "from wrapped_driver import WrappedDriver def test_empty_chromedriver_path(): \"\"\"Assert error is raised", "with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True) def test_no_chromedriver_path(): \"\"\"Assert error is raised", "import WrappedDriver def test_empty_chromedriver_path(): \"\"\"Assert error is raised if no", "def test_no_chromedriver_path(): \"\"\"Assert error is raised if no chromedriver path", "\"\"\"Assert error is raised if no chromedriver path is used\"\"\"", "raised if no chromedriver path is used\"\"\" with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\",", "is used\"\"\" with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True) def test_no_chromedriver_path(): \"\"\"Assert error", "no chromedriver path is used\"\"\" with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True) def", "from selenium.common.exceptions import WebDriverException from wrapped_driver import WrappedDriver def test_empty_chromedriver_path():", "selenium.common.exceptions import WebDriverException from wrapped_driver import WrappedDriver def test_empty_chromedriver_path(): \"\"\"Assert", "chromedriver path is used\"\"\" with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True) def test_no_chromedriver_path():", "test_no_chromedriver_path(): \"\"\"Assert error is raised if no chromedriver path is", "import WebDriverException from wrapped_driver import WrappedDriver def test_empty_chromedriver_path(): \"\"\"Assert error", "pytest from selenium.common.exceptions import WebDriverException from wrapped_driver import WrappedDriver def", "path is used\"\"\" with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True) def test_no_chromedriver_path(): \"\"\"Assert", "def test_empty_chromedriver_path(): \"\"\"Assert error is raised if no chromedriver path", "pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True) def test_no_chromedriver_path(): \"\"\"Assert error is raised if", "WebDriverException from wrapped_driver import WrappedDriver def test_empty_chromedriver_path(): \"\"\"Assert error is", "WrappedDriver def test_empty_chromedriver_path(): \"\"\"Assert error is raised if no chromedriver", "if no chromedriver path is used\"\"\" with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True)", "WrappedDriver(executable_path=\"\", headless=True) def test_no_chromedriver_path(): \"\"\"Assert error is raised if no", "error is raised if no chromedriver path is used\"\"\" with", "raised if no chromedriver path is used\"\"\" with pytest.raises(TypeError): WrappedDriver(headless=True)", "used\"\"\" with pytest.raises(WebDriverException): WrappedDriver(executable_path=\"\", headless=True) def test_no_chromedriver_path(): \"\"\"Assert error is", "import pytest from selenium.common.exceptions import WebDriverException from wrapped_driver import WrappedDriver" ]
[ "from rlp.sedes import ( CountableList, ) from eth.rlp.headers import (", "import ( PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction fields", "from eth.rlp.headers import ( BlockHeader, ) from eth.vm.forks.byzantium.blocks import (", "ByzantiumBlock, ) from .transactions import ( PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock):", "transaction_builder = PetersburgTransaction fields = [ ('header', BlockHeader), ('transactions', CountableList(transaction_builder)),", "from .transactions import ( PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock): transaction_builder =", "( CountableList, ) from eth.rlp.headers import ( BlockHeader, ) from", "BlockHeader, ) from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, ) from .transactions", "<reponame>ggs134/py-evm from rlp.sedes import ( CountableList, ) from eth.rlp.headers import", "rlp.sedes import ( CountableList, ) from eth.rlp.headers import ( BlockHeader,", ") from eth.rlp.headers import ( BlockHeader, ) from eth.vm.forks.byzantium.blocks import", "fields = [ ('header', BlockHeader), ('transactions', CountableList(transaction_builder)), ('uncles', CountableList(BlockHeader)) ]", "= PetersburgTransaction fields = [ ('header', BlockHeader), ('transactions', CountableList(transaction_builder)), ('uncles',", ") from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, ) from .transactions import", "class PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction fields = [ ('header', BlockHeader),", "PetersburgTransaction fields = [ ('header', BlockHeader), ('transactions', CountableList(transaction_builder)), ('uncles', CountableList(BlockHeader))", "import ( BlockHeader, ) from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, )", ".transactions import ( PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction", "PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction fields = [", "import ( ByzantiumBlock, ) from .transactions import ( PetersburgTransaction, )", "PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction fields = [ ('header', BlockHeader), ('transactions',", "import ( CountableList, ) from eth.rlp.headers import ( BlockHeader, )", "CountableList, ) from eth.rlp.headers import ( BlockHeader, ) from eth.vm.forks.byzantium.blocks", ") class PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction fields = [ ('header',", "from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, ) from .transactions import (", "eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, ) from .transactions import ( PetersburgTransaction,", "( BlockHeader, ) from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, ) from", ") from .transactions import ( PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock): transaction_builder", "eth.rlp.headers import ( BlockHeader, ) from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock,", "( PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction fields =", "( ByzantiumBlock, ) from .transactions import ( PetersburgTransaction, ) class" ]
[ "in timings: if timings[r] >= thresh: print(f\"{r} ({timings[r]:.03}s)\") print(f\"test discovery", "if args.quick: os.environ[\"QUICKTEST\"] = \"True\" # Get all test names", "\"tests finished\") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, \"tests", "2.0 (the \"License\"); # you may not use this file", "%(default)d)\", ) parser.add_argument(\"-q\", \"--quick\", action=\"store_true\", dest=\"quick\", default=False, help=\"Only do quick", "help=\"Display tests longer than given threshold (default: %(default)d)\", ) parser.add_argument(", "keys to be unique\") results[name] = elapsed super().stopTest(test) def print_results(results,", "print(\"Remember to check above times for any errors!\") def parse_args(default_pattern):", "return print(f\"\\n\\n{status}, printing completed times >{thresh}s in ascending order...\\n\") timings", "({elapsed:.03}s)\\n\") if name in results: raise AssertionError(\"expected all keys to", "discovery_time, args.thresh, \"tests cancelled\") sys.exit(1) except Exception: print_results(results, discovery_time, args.thresh,", "can store the results.\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "\"-f\", \"--failfast\", action=\"store_true\", dest=\"failfast\", default=False, help=\"Stop testing on first failure\"", "parser.add_argument(\"-q\", \"--quick\", action=\"store_true\", dest=\"quick\", default=False, help=\"Only do quick tests\") parser.add_argument(", "governing permissions and # limitations under the License. import argparse", "interruption try: test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh, \"tests finished\")", "discovery_time, args.thresh, \"tests finished\") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time,", "# noqa: N802 \"\"\"Start timer, print test name, do normal", "raise AssertionError(\"expected all keys to be unique\") results[name] = elapsed", "testing on first failure\" ) args = parser.parse_args() print(f\"Running tests", "unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use try catches to", "\"--failfast\", action=\"store_true\", dest=\"failfast\", default=False, help=\"Stop testing on first failure\" )", "default=False, help=\"Stop testing on first failure\" ) args = parser.parse_args()", "use this file except in compliance with the License. #", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless", "<reponame>crnbaker/MONAI # Copyright 2020 MONAI Consortium # Licensed under the", "failure\" ) args = parser.parse_args() print(f\"Running tests in folder: '{args.path}'\")", "parser.add_argument( \"-p\", action=\"store\", dest=\"pattern\", default=default_pattern, help=\"Pattern to match tests (default:", "argparse.ArgumentParser(description=\"Runner for MONAI unittests with timing.\") parser.add_argument( \"-s\", action=\"store\", dest=\"path\",", "N802 \"\"\"On test end, get time, print, store and do", "License. # You may obtain a copy of the License", "order...\\n\") timings = dict(sorted(results.items(), key=lambda item: item[1])) for r in", "file pattern: '{args.pattern}'\") return args def get_default_pattern(loader): signature = inspect.signature(loader.discover)", "test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use try", "pattern: '{args.pattern}'\") return args def get_default_pattern(loader): signature = inspect.signature(loader.discover) params", "test: {name} ({elapsed:.03}s)\\n\") if name in results: raise AssertionError(\"expected all", "x[1] > thresh, results.items())) if len(results) == 0: return print(f\"\\n\\n{status},", "printing completed times >{thresh}s in ascending order...\\n\") timings = dict(sorted(results.items(),", "discovery_time = pc.total_time print(f\"time to discover tests: {discovery_time}s\") test_runner =", "under the License is distributed on an \"AS IS\" BASIS,", "first failure\" ) args = parser.parse_args() print(f\"Running tests in folder:", "License for the specific language governing permissions and # limitations", "print(f\"Running tests in folder: '{args.path}'\") if args.pattern: print(f\"With file pattern:", "\"\"\"Start timer, print test name, do normal test.\"\"\" self.start_time =", "\"__main__\": loader = unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse input", "parser.parse_args() print(f\"Running tests in folder: '{args.path}'\") if args.pattern: print(f\"With file", "except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, \"tests cancelled\") sys.exit(1) except Exception:", "args def get_default_pattern(loader): signature = inspect.signature(loader.discover) params = {k: v.default", "time: {discovery_time:.03}s\") print(f\"total testing time: {sum(results.values()):.03}s\") print(\"Remember to check above", "def parse_args(default_pattern): parser = argparse.ArgumentParser(description=\"Runner for MONAI unittests with timing.\")", "at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "Parse input arguments args = parse_args(default_pattern) # If quick is", "*args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests = dict() def startTest(self, test):", "tests\") parser.add_argument( \"-f\", \"--failfast\", action=\"store_true\", dest=\"failfast\", default=False, help=\"Stop testing on", ") parser.add_argument( \"-p\", action=\"store\", dest=\"pattern\", default=default_pattern, help=\"Pattern to match tests", "print_results(results, discovery_time, thresh, status): # only keep results >= threshold", "parser = argparse.ArgumentParser(description=\"Runner for MONAI unittests with timing.\") parser.add_argument( \"-s\",", "pc.total_time print(f\"time to discover tests: {discovery_time}s\") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult,", "if name in results: raise AssertionError(\"expected all keys to be", "results so that we can store the results.\"\"\" def __init__(self,", "for MONAI unittests with timing.\") parser.add_argument( \"-s\", action=\"store\", dest=\"path\", default=\".\",", "in compliance with the License. # You may obtain a", "# only keep results >= threshold results = dict(filter(lambda x:", "software # distributed under the License is distributed on an", "import inspect import os import sys import time import unittest", "self.start_time = time.time() name = self.getDescription(test) self.stream.write(f\"Starting test: {name}...\\n\") super().startTest(test)", "v.default for k, v in signature.parameters.items() if v.default is not", "print(f\"time to discover tests: {discovery_time}s\") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity,", "action=\"store\", dest=\"path\", default=\".\", help=\"Directory to start discovery (default: '%(default)s')\" )", "loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f\"time to discover tests: {discovery_time}s\")", "signature.parameters.items() if v.default is not inspect.Parameter.empty} return params[\"pattern\"] if __name__", "timings = dict(sorted(results.items(), key=lambda item: item[1])) for r in timings:", "import time import unittest from monai.utils import PerfContext results: dict", "\"\"\"Overload the default results so that we can store the", "License. import argparse import inspect import os import sys import", "{name} ({elapsed:.03}s)\\n\") if name in results: raise AssertionError(\"expected all keys", "# noqa: N802 \"\"\"On test end, get time, print, store", "any errors!\") def parse_args(default_pattern): parser = argparse.ArgumentParser(description=\"Runner for MONAI unittests", "results: dict = dict() class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the default results", "time.time() - self.start_time name = self.getDescription(test) self.stream.write(f\"Finished test: {name} ({elapsed:.03}s)\\n\")", "Copyright 2020 MONAI Consortium # Licensed under the Apache License,", "\"--thresh\", dest=\"thresh\", default=10.0, type=float, help=\"Display tests longer than given threshold", "'{args.path}'\") if args.pattern: print(f\"With file pattern: '{args.pattern}'\") return args def", "to match tests (default: '%(default)s')\", ) parser.add_argument( \"-t\", \"--thresh\", dest=\"thresh\",", "super().stopTest(test) def print_results(results, discovery_time, thresh, status): # only keep results", "'%(default)s')\" ) parser.add_argument( \"-p\", action=\"store\", dest=\"pattern\", default=default_pattern, help=\"Pattern to match", "\"\"\"On test end, get time, print, store and do normal", "time.time() name = self.getDescription(test) self.stream.write(f\"Starting test: {name}...\\n\") super().startTest(test) def stopTest(self,", "only keep results >= threshold results = dict(filter(lambda x: x[1]", "in signature.parameters.items() if v.default is not inspect.Parameter.empty} return params[\"pattern\"] if", "Get all test names (optionally from some path with some", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "startTest(self, test): # noqa: N802 \"\"\"Start timer, print test name,", "the License. # You may obtain a copy of the", "for the specific language governing permissions and # limitations under", "= unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse input arguments args", "to in writing, software # distributed under the License is", "to print the current results if encountering exception or keyboard", "try: test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh, \"tests finished\") sys.exit(not", "# See the License for the specific language governing permissions", "sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, \"tests cancelled\") sys.exit(1)", "inspect.Parameter.empty} return params[\"pattern\"] if __name__ == \"__main__\": loader = unittest.TestLoader()", "in folder: '{args.path}'\") if args.pattern: print(f\"With file pattern: '{args.pattern}'\") return", "= loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f\"time to discover tests:", "k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} return", "MONAI Consortium # Licensed under the Apache License, Version 2.0", "or agreed to in writing, software # distributed under the", "or keyboard interruption try: test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh,", "dest=\"pattern\", default=default_pattern, help=\"Pattern to match tests (default: '%(default)s')\", ) parser.add_argument(", "required by applicable law or agreed to in writing, software", "action=\"store_true\", dest=\"quick\", default=False, help=\"Only do quick tests\") parser.add_argument( \"-f\", \"--failfast\",", "given threshold (default: %(default)d)\", ) parser.add_argument( \"-v\", \"--verbosity\", action=\"store\", dest=\"verbosity\",", "\"--quick\", action=\"store_true\", dest=\"quick\", default=False, help=\"Only do quick tests\") parser.add_argument( \"-f\",", "dest=\"verbosity\", type=int, default=1, help=\"Verbosity level (default: %(default)d)\", ) parser.add_argument(\"-q\", \"--quick\",", "variable if args.quick: os.environ[\"QUICKTEST\"] = \"True\" # Get all test", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "from monai.utils import PerfContext results: dict = dict() class TimeLoggingTestResult(unittest.TextTestResult):", "with the License. # You may obtain a copy of", "default=default_pattern, help=\"Pattern to match tests (default: '%(default)s')\", ) parser.add_argument( \"-t\",", "permissions and # limitations under the License. import argparse import", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests = dict() def startTest(self,", "set environment variable if args.quick: os.environ[\"QUICKTEST\"] = \"True\" # Get", "if timings[r] >= thresh: print(f\"{r} ({timings[r]:.03}s)\") print(f\"test discovery time: {discovery_time:.03}s\")", "be unique\") results[name] = elapsed super().stopTest(test) def print_results(results, discovery_time, thresh,", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "discover tests: {discovery_time}s\") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast )", "\"-s\", action=\"store\", dest=\"path\", default=\".\", help=\"Directory to start discovery (default: '%(default)s')\"", ") parser.add_argument( \"-v\", \"--verbosity\", action=\"store\", dest=\"verbosity\", type=int, default=1, help=\"Verbosity level", "environment variable if args.quick: os.environ[\"QUICKTEST\"] = \"True\" # Get all", "sys import time import unittest from monai.utils import PerfContext results:", "distributed under the License is distributed on an \"AS IS\"", "level (default: %(default)d)\", ) parser.add_argument(\"-q\", \"--quick\", action=\"store_true\", dest=\"quick\", default=False, help=\"Only", "{k: v.default for k, v in signature.parameters.items() if v.default is", "start discovery (default: '%(default)s')\" ) parser.add_argument( \"-p\", action=\"store\", dest=\"pattern\", default=default_pattern,", "key=lambda item: item[1])) for r in timings: if timings[r] >=", "= inspect.signature(loader.discover) params = {k: v.default for k, v in", "inspect.signature(loader.discover) params = {k: v.default for k, v in signature.parameters.items()", "names (optionally from some path with some pattern) with PerfContext()", "elapsed = time.time() - self.start_time name = self.getDescription(test) self.stream.write(f\"Finished test:", "dest=\"failfast\", default=False, help=\"Stop testing on first failure\" ) args =", "v in signature.parameters.items() if v.default is not inspect.Parameter.empty} return params[\"pattern\"]", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "do quick tests\") parser.add_argument( \"-f\", \"--failfast\", action=\"store_true\", dest=\"failfast\", default=False, help=\"Stop", "self.getDescription(test) self.stream.write(f\"Starting test: {name}...\\n\") super().startTest(test) def stopTest(self, test): # noqa:", "'{args.pattern}'\") return args def get_default_pattern(loader): signature = inspect.signature(loader.discover) params =", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# Parse input arguments args = parse_args(default_pattern) # If quick", "not use this file except in compliance with the License.", "dict = dict() class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the default results so", "input arguments args = parse_args(default_pattern) # If quick is desired,", "os.environ[\"QUICKTEST\"] = \"True\" # Get all test names (optionally from", "writing, software # distributed under the License is distributed on", "(default: '%(default)s')\", ) parser.add_argument( \"-t\", \"--thresh\", dest=\"thresh\", default=10.0, type=float, help=\"Display", "you may not use this file except in compliance with", "def get_default_pattern(loader): signature = inspect.signature(loader.discover) params = {k: v.default for", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "import sys import time import unittest from monai.utils import PerfContext", "discovery time: {discovery_time:.03}s\") print(f\"total testing time: {sum(results.values()):.03}s\") print(\"Remember to check", "{discovery_time}s\") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use", "results.\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests = dict()", "thresh: print(f\"{r} ({timings[r]:.03}s)\") print(f\"test discovery time: {discovery_time:.03}s\") print(f\"total testing time:", "parse_args(default_pattern) # If quick is desired, set environment variable if", "args = parser.parse_args() print(f\"Running tests in folder: '{args.path}'\") if args.pattern:", "(default: %(default)d)\", ) parser.add_argument( \"-v\", \"--verbosity\", action=\"store\", dest=\"verbosity\", type=int, default=1,", "if encountering exception or keyboard interruption try: test_result = test_runner.run(tests)", "CONDITIONS OF ANY KIND, either express or implied. # See", "the results.\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests =", "discovery (default: '%(default)s')\" ) parser.add_argument( \"-p\", action=\"store\", dest=\"pattern\", default=default_pattern, help=\"Pattern", "print the current results if encountering exception or keyboard interruption", "desired, set environment variable if args.quick: os.environ[\"QUICKTEST\"] = \"True\" #", "test: {name}...\\n\") super().startTest(test) def stopTest(self, test): # noqa: N802 \"\"\"On", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "Consortium # Licensed under the Apache License, Version 2.0 (the", "dest=\"quick\", default=False, help=\"Only do quick tests\") parser.add_argument( \"-f\", \"--failfast\", action=\"store_true\",", "default results so that we can store the results.\"\"\" def", "\"tests cancelled\") sys.exit(1) except Exception: print_results(results, discovery_time, args.thresh, \"exception reached\")", "http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "parse_args(default_pattern): parser = argparse.ArgumentParser(description=\"Runner for MONAI unittests with timing.\") parser.add_argument(", "ascending order...\\n\") timings = dict(sorted(results.items(), key=lambda item: item[1])) for r", "test name, do normal test.\"\"\" self.start_time = time.time() name =", "= test_runner.run(tests) print_results(results, discovery_time, args.thresh, \"tests finished\") sys.exit(not test_result.wasSuccessful()) except", "\"True\" # Get all test names (optionally from some path", "print_results(results, discovery_time, args.thresh, \"tests finished\") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results,", "time: {sum(results.values()):.03}s\") print(\"Remember to check above times for any errors!\")", "if v.default is not inspect.Parameter.empty} return params[\"pattern\"] if __name__ ==", "normal behaviour.\"\"\" elapsed = time.time() - self.start_time name = self.getDescription(test)", "import os import sys import time import unittest from monai.utils", "OR CONDITIONS OF ANY KIND, either express or implied. #", "import unittest from monai.utils import PerfContext results: dict = dict()", "the License is distributed on an \"AS IS\" BASIS, #", "thresh, results.items())) if len(results) == 0: return print(f\"\\n\\n{status}, printing completed", "\"-v\", \"--verbosity\", action=\"store\", dest=\"verbosity\", type=int, default=1, help=\"Verbosity level (default: %(default)d)\",", "test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, \"tests cancelled\") sys.exit(1) except", "time import unittest from monai.utils import PerfContext results: dict =", "**kwargs) self.timed_tests = dict() def startTest(self, test): # noqa: N802", ">= thresh: print(f\"{r} ({timings[r]:.03}s)\") print(f\"test discovery time: {discovery_time:.03}s\") print(f\"total testing", "elapsed super().stopTest(test) def print_results(results, discovery_time, thresh, status): # only keep", "current results if encountering exception or keyboard interruption try: test_result", "\"--verbosity\", action=\"store\", dest=\"verbosity\", type=int, default=1, help=\"Verbosity level (default: %(default)d)\", )", "= dict(filter(lambda x: x[1] > thresh, results.items())) if len(results) ==", "resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use try catches to print", ") parser.add_argument( \"-t\", \"--thresh\", dest=\"thresh\", default=10.0, type=float, help=\"Display tests longer", "than given threshold (default: %(default)d)\", ) parser.add_argument( \"-v\", \"--verbosity\", action=\"store\",", "default=\".\", help=\"Directory to start discovery (default: '%(default)s')\" ) parser.add_argument( \"-p\",", "parser.add_argument( \"-v\", \"--verbosity\", action=\"store\", dest=\"verbosity\", type=int, default=1, help=\"Verbosity level (default:", "law or agreed to in writing, software # distributed under", "super().startTest(test) def stopTest(self, test): # noqa: N802 \"\"\"On test end,", "and # limitations under the License. import argparse import inspect", "store the results.\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests", "print(f\"test discovery time: {discovery_time:.03}s\") print(f\"total testing time: {sum(results.values()):.03}s\") print(\"Remember to", "License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "def stopTest(self, test): # noqa: N802 \"\"\"On test end, get", "to check above times for any errors!\") def parse_args(default_pattern): parser", "signature = inspect.signature(loader.discover) params = {k: v.default for k, v", "test_runner.run(tests) print_results(results, discovery_time, args.thresh, \"tests finished\") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt:", "results = dict(filter(lambda x: x[1] > thresh, results.items())) if len(results)", "discovery_time, thresh, status): # only keep results >= threshold results", "self.timed_tests = dict() def startTest(self, test): # noqa: N802 \"\"\"Start", "import argparse import inspect import os import sys import time", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# Copyright 2020 MONAI Consortium # Licensed under the Apache", "timer, print test name, do normal test.\"\"\" self.start_time = time.time()", "unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse input arguments args =", "name, do normal test.\"\"\" self.start_time = time.time() name = self.getDescription(test)", "may not use this file except in compliance with the", "we can store the results.\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args,", "\"-p\", action=\"store\", dest=\"pattern\", default=default_pattern, help=\"Pattern to match tests (default: '%(default)s')\",", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "{discovery_time:.03}s\") print(f\"total testing time: {sum(results.values()):.03}s\") print(\"Remember to check above times", "this file except in compliance with the License. # You", "results.items())) if len(results) == 0: return print(f\"\\n\\n{status}, printing completed times", "parser.add_argument( \"-t\", \"--thresh\", dest=\"thresh\", default=10.0, type=float, help=\"Display tests longer than", "args.thresh, \"tests finished\") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh,", "in results: raise AssertionError(\"expected all keys to be unique\") results[name]", "finished\") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, \"tests cancelled\")", "with PerfContext() as pc: tests = loader.discover(args.path, args.pattern) discovery_time =", "(optionally from some path with some pattern) with PerfContext() as", "= dict() class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the default results so that", "tests in folder: '{args.path}'\") if args.pattern: print(f\"With file pattern: '{args.pattern}'\")", "get_default_pattern(loader): signature = inspect.signature(loader.discover) params = {k: v.default for k,", "folder: '{args.path}'\") if args.pattern: print(f\"With file pattern: '{args.pattern}'\") return args", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "end, get time, print, store and do normal behaviour.\"\"\" elapsed", "help=\"Stop testing on first failure\" ) args = parser.parse_args() print(f\"Running", "super().__init__(*args, **kwargs) self.timed_tests = dict() def startTest(self, test): # noqa:", "test): # noqa: N802 \"\"\"On test end, get time, print,", "= parse_args(default_pattern) # If quick is desired, set environment variable", "parser.add_argument( \"-f\", \"--failfast\", action=\"store_true\", dest=\"failfast\", default=False, help=\"Stop testing on first", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests = dict() def", "is not inspect.Parameter.empty} return params[\"pattern\"] if __name__ == \"__main__\": loader", "= get_default_pattern(loader) # Parse input arguments args = parse_args(default_pattern) #", "= elapsed super().stopTest(test) def print_results(results, discovery_time, thresh, status): # only", "default=1, help=\"Verbosity level (default: %(default)d)\", ) parser.add_argument(\"-q\", \"--quick\", action=\"store_true\", dest=\"quick\",", "TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the default results so that we can store", "**kwargs): super().__init__(*args, **kwargs) self.timed_tests = dict() def startTest(self, test): #", "test): # noqa: N802 \"\"\"Start timer, print test name, do", "testing time: {sum(results.values()):.03}s\") print(\"Remember to check above times for any", "# Use try catches to print the current results if", "under the License. import argparse import inspect import os import", "for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}", "help=\"Directory to start discovery (default: '%(default)s')\" ) parser.add_argument( \"-p\", action=\"store\",", "of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "self.start_time name = self.getDescription(test) self.stream.write(f\"Finished test: {name} ({elapsed:.03}s)\\n\") if name", "== 0: return print(f\"\\n\\n{status}, printing completed times >{thresh}s in ascending", ") args = parser.parse_args() print(f\"Running tests in folder: '{args.path}'\") if", "language governing permissions and # limitations under the License. import", "return params[\"pattern\"] if __name__ == \"__main__\": loader = unittest.TestLoader() default_pattern", "inspect import os import sys import time import unittest from", "do normal behaviour.\"\"\" elapsed = time.time() - self.start_time name =", "results[name] = elapsed super().stopTest(test) def print_results(results, discovery_time, thresh, status): #", "type=float, help=\"Display tests longer than given threshold (default: %(default)d)\", )", "default=10.0, type=float, help=\"Display tests longer than given threshold (default: %(default)d)\",", "print(f\"{r} ({timings[r]:.03}s)\") print(f\"test discovery time: {discovery_time:.03}s\") print(f\"total testing time: {sum(results.values()):.03}s\")", "tests longer than given threshold (default: %(default)d)\", ) parser.add_argument( \"-v\",", "or implied. # See the License for the specific language", "def startTest(self, test): # noqa: N802 \"\"\"Start timer, print test", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "help=\"Pattern to match tests (default: '%(default)s')\", ) parser.add_argument( \"-t\", \"--thresh\",", "some pattern) with PerfContext() as pc: tests = loader.discover(args.path, args.pattern)", "{name}...\\n\") super().startTest(test) def stopTest(self, test): # noqa: N802 \"\"\"On test", "v.default is not inspect.Parameter.empty} return params[\"pattern\"] if __name__ == \"__main__\":", "name = self.getDescription(test) self.stream.write(f\"Finished test: {name} ({elapsed:.03}s)\\n\") if name in", "if __name__ == \"__main__\": loader = unittest.TestLoader() default_pattern = get_default_pattern(loader)", "dict() class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the default results so that we", "dict() def startTest(self, test): # noqa: N802 \"\"\"Start timer, print", "to discover tests: {discovery_time}s\") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast", "\"-t\", \"--thresh\", dest=\"thresh\", default=10.0, type=float, help=\"Display tests longer than given", "as pc: tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f\"time", "item[1])) for r in timings: if timings[r] >= thresh: print(f\"{r}", "import PerfContext results: dict = dict() class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the", "print, store and do normal behaviour.\"\"\" elapsed = time.time() -", "'%(default)s')\", ) parser.add_argument( \"-t\", \"--thresh\", dest=\"thresh\", default=10.0, type=float, help=\"Display tests", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required", "if len(results) == 0: return print(f\"\\n\\n{status}, printing completed times >{thresh}s", ">{thresh}s in ascending order...\\n\") timings = dict(sorted(results.items(), key=lambda item: item[1]))", "default=False, help=\"Only do quick tests\") parser.add_argument( \"-f\", \"--failfast\", action=\"store_true\", dest=\"failfast\",", "(the \"License\"); # you may not use this file except", "unittests with timing.\") parser.add_argument( \"-s\", action=\"store\", dest=\"path\", default=\".\", help=\"Directory to", "that we can store the results.\"\"\" def __init__(self, *args, **kwargs):", "# you may not use this file except in compliance", "noqa: N802 \"\"\"Start timer, print test name, do normal test.\"\"\"", "results >= threshold results = dict(filter(lambda x: x[1] > thresh,", "exception or keyboard interruption try: test_result = test_runner.run(tests) print_results(results, discovery_time,", "print(f\"\\n\\n{status}, printing completed times >{thresh}s in ascending order...\\n\") timings =", "the current results if encountering exception or keyboard interruption try:", "get time, print, store and do normal behaviour.\"\"\" elapsed =", "on first failure\" ) args = parser.parse_args() print(f\"Running tests in", "timing.\") parser.add_argument( \"-s\", action=\"store\", dest=\"path\", default=\".\", help=\"Directory to start discovery", "behaviour.\"\"\" elapsed = time.time() - self.start_time name = self.getDescription(test) self.stream.write(f\"Finished", "- self.start_time name = self.getDescription(test) self.stream.write(f\"Finished test: {name} ({elapsed:.03}s)\\n\") if", "threshold results = dict(filter(lambda x: x[1] > thresh, results.items())) if", "for any errors!\") def parse_args(default_pattern): parser = argparse.ArgumentParser(description=\"Runner for MONAI", "= unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use try catches", "0: return print(f\"\\n\\n{status}, printing completed times >{thresh}s in ascending order...\\n\")", "completed times >{thresh}s in ascending order...\\n\") timings = dict(sorted(results.items(), key=lambda", "Version 2.0 (the \"License\"); # you may not use this", "= self.getDescription(test) self.stream.write(f\"Finished test: {name} ({elapsed:.03}s)\\n\") if name in results:", "to be unique\") results[name] = elapsed super().stopTest(test) def print_results(results, discovery_time,", "quick tests\") parser.add_argument( \"-f\", \"--failfast\", action=\"store_true\", dest=\"failfast\", default=False, help=\"Stop testing", "try catches to print the current results if encountering exception", "stopTest(self, test): # noqa: N802 \"\"\"On test end, get time,", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0", "args.pattern) discovery_time = pc.total_time print(f\"time to discover tests: {discovery_time}s\") test_runner", "status): # only keep results >= threshold results = dict(filter(lambda", "if args.pattern: print(f\"With file pattern: '{args.pattern}'\") return args def get_default_pattern(loader):", "print test name, do normal test.\"\"\" self.start_time = time.time() name", "action=\"store\", dest=\"pattern\", default=default_pattern, help=\"Pattern to match tests (default: '%(default)s')\", )", "implied. # See the License for the specific language governing", "= argparse.ArgumentParser(description=\"Runner for MONAI unittests with timing.\") parser.add_argument( \"-s\", action=\"store\",", "dest=\"path\", default=\".\", help=\"Directory to start discovery (default: '%(default)s')\" ) parser.add_argument(", "under the Apache License, Version 2.0 (the \"License\"); # you", "x: x[1] > thresh, results.items())) if len(results) == 0: return", "MONAI unittests with timing.\") parser.add_argument( \"-s\", action=\"store\", dest=\"path\", default=\".\", help=\"Directory", "__name__ == \"__main__\": loader = unittest.TestLoader() default_pattern = get_default_pattern(loader) #", "action=\"store_true\", dest=\"failfast\", default=False, help=\"Stop testing on first failure\" ) args", "args.pattern: print(f\"With file pattern: '{args.pattern}'\") return args def get_default_pattern(loader): signature", "print_results(results, discovery_time, args.thresh, \"tests cancelled\") sys.exit(1) except Exception: print_results(results, discovery_time,", "thresh, status): # only keep results >= threshold results =", "# If quick is desired, set environment variable if args.quick:", "and do normal behaviour.\"\"\" elapsed = time.time() - self.start_time name", "for r in timings: if timings[r] >= thresh: print(f\"{r} ({timings[r]:.03}s)\")", "pattern) with PerfContext() as pc: tests = loader.discover(args.path, args.pattern) discovery_time", "by applicable law or agreed to in writing, software #", "args.thresh, \"tests cancelled\") sys.exit(1) except Exception: print_results(results, discovery_time, args.thresh, \"exception", "failfast=args.failfast ) # Use try catches to print the current", "KeyboardInterrupt: print_results(results, discovery_time, args.thresh, \"tests cancelled\") sys.exit(1) except Exception: print_results(results,", "the default results so that we can store the results.\"\"\"", "normal test.\"\"\" self.start_time = time.time() name = self.getDescription(test) self.stream.write(f\"Starting test:", "the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "path with some pattern) with PerfContext() as pc: tests =", "verbosity=args.verbosity, failfast=args.failfast ) # Use try catches to print the", "# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "If quick is desired, set environment variable if args.quick: os.environ[\"QUICKTEST\"]", "dict(sorted(results.items(), key=lambda item: item[1])) for r in timings: if timings[r]", "errors!\") def parse_args(default_pattern): parser = argparse.ArgumentParser(description=\"Runner for MONAI unittests with", "store and do normal behaviour.\"\"\" elapsed = time.time() - self.start_time", "test.\"\"\" self.start_time = time.time() name = self.getDescription(test) self.stream.write(f\"Starting test: {name}...\\n\")", "unique\") results[name] = elapsed super().stopTest(test) def print_results(results, discovery_time, thresh, status):", "r in timings: if timings[r] >= thresh: print(f\"{r} ({timings[r]:.03}s)\") print(f\"test", "self.stream.write(f\"Starting test: {name}...\\n\") super().startTest(test) def stopTest(self, test): # noqa: N802", "= dict(sorted(results.items(), key=lambda item: item[1])) for r in timings: if", "threshold (default: %(default)d)\", ) parser.add_argument( \"-v\", \"--verbosity\", action=\"store\", dest=\"verbosity\", type=int,", "arguments args = parse_args(default_pattern) # If quick is desired, set", "in ascending order...\\n\") timings = dict(sorted(results.items(), key=lambda item: item[1])) for", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2020 MONAI Consortium # Licensed under the Apache License, Version", "above times for any errors!\") def parse_args(default_pattern): parser = argparse.ArgumentParser(description=\"Runner", "Unless required by applicable law or agreed to in writing,", "test names (optionally from some path with some pattern) with", "{sum(results.values()):.03}s\") print(\"Remember to check above times for any errors!\") def", "== \"__main__\": loader = unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse", "AssertionError(\"expected all keys to be unique\") results[name] = elapsed super().stopTest(test)", "= dict() def startTest(self, test): # noqa: N802 \"\"\"Start timer,", "so that we can store the results.\"\"\" def __init__(self, *args,", "({timings[r]:.03}s)\") print(f\"test discovery time: {discovery_time:.03}s\") print(f\"total testing time: {sum(results.values()):.03}s\") print(\"Remember", "test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh, \"tests finished\") sys.exit(not test_result.wasSuccessful())", ") # Use try catches to print the current results", "the specific language governing permissions and # limitations under the", "= \"True\" # Get all test names (optionally from some", "with some pattern) with PerfContext() as pc: tests = loader.discover(args.path,", "= self.getDescription(test) self.stream.write(f\"Starting test: {name}...\\n\") super().startTest(test) def stopTest(self, test): #", "applicable law or agreed to in writing, software # distributed", "= pc.total_time print(f\"time to discover tests: {discovery_time}s\") test_runner = unittest.runner.TextTestRunner(", "N802 \"\"\"Start timer, print test name, do normal test.\"\"\" self.start_time", "not inspect.Parameter.empty} return params[\"pattern\"] if __name__ == \"__main__\": loader =", "limitations under the License. import argparse import inspect import os", "action=\"store\", dest=\"verbosity\", type=int, default=1, help=\"Verbosity level (default: %(default)d)\", ) parser.add_argument(\"-q\",", "quick is desired, set environment variable if args.quick: os.environ[\"QUICKTEST\"] =", "time, print, store and do normal behaviour.\"\"\" elapsed = time.time()", "with timing.\") parser.add_argument( \"-s\", action=\"store\", dest=\"path\", default=\".\", help=\"Directory to start", "in writing, software # distributed under the License is distributed", "def print_results(results, discovery_time, thresh, status): # only keep results >=", "args.quick: os.environ[\"QUICKTEST\"] = \"True\" # Get all test names (optionally", "= time.time() name = self.getDescription(test) self.stream.write(f\"Starting test: {name}...\\n\") super().startTest(test) def", "noqa: N802 \"\"\"On test end, get time, print, store and", "# Get all test names (optionally from some path with", "all keys to be unique\") results[name] = elapsed super().stopTest(test) def", "all test names (optionally from some path with some pattern)", "longer than given threshold (default: %(default)d)\", ) parser.add_argument( \"-v\", \"--verbosity\",", "print(f\"With file pattern: '{args.pattern}'\") return args def get_default_pattern(loader): signature =", "catches to print the current results if encountering exception or", "the License. import argparse import inspect import os import sys", "do normal test.\"\"\" self.start_time = time.time() name = self.getDescription(test) self.stream.write(f\"Starting", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "type=int, default=1, help=\"Verbosity level (default: %(default)d)\", ) parser.add_argument(\"-q\", \"--quick\", action=\"store_true\",", "cancelled\") sys.exit(1) except Exception: print_results(results, discovery_time, args.thresh, \"exception reached\") raise", "License, Version 2.0 (the \"License\"); # you may not use", "# limitations under the License. import argparse import inspect import", "(default: '%(default)s')\" ) parser.add_argument( \"-p\", action=\"store\", dest=\"pattern\", default=default_pattern, help=\"Pattern to", "args = parse_args(default_pattern) # If quick is desired, set environment", "results if encountering exception or keyboard interruption try: test_result =", "# You may obtain a copy of the License at", "self.stream.write(f\"Finished test: {name} ({elapsed:.03}s)\\n\") if name in results: raise AssertionError(\"expected", "return args def get_default_pattern(loader): signature = inspect.signature(loader.discover) params = {k:", "default_pattern = get_default_pattern(loader) # Parse input arguments args = parse_args(default_pattern)", "parser.add_argument( \"-s\", action=\"store\", dest=\"path\", default=\".\", help=\"Directory to start discovery (default:", "self.getDescription(test) self.stream.write(f\"Finished test: {name} ({elapsed:.03}s)\\n\") if name in results: raise", "dict(filter(lambda x: x[1] > thresh, results.items())) if len(results) == 0:", "check above times for any errors!\") def parse_args(default_pattern): parser =", "help=\"Verbosity level (default: %(default)d)\", ) parser.add_argument(\"-q\", \"--quick\", action=\"store_true\", dest=\"quick\", default=False,", "timings[r] >= thresh: print(f\"{r} ({timings[r]:.03}s)\") print(f\"test discovery time: {discovery_time:.03}s\") print(f\"total", "timings: if timings[r] >= thresh: print(f\"{r} ({timings[r]:.03}s)\") print(f\"test discovery time:", "the License for the specific language governing permissions and #", "Apache License, Version 2.0 (the \"License\"); # you may not", "name = self.getDescription(test) self.stream.write(f\"Starting test: {name}...\\n\") super().startTest(test) def stopTest(self, test):", "times for any errors!\") def parse_args(default_pattern): parser = argparse.ArgumentParser(description=\"Runner for", "either express or implied. # See the License for the", "is desired, set environment variable if args.quick: os.environ[\"QUICKTEST\"] = \"True\"", "Use try catches to print the current results if encountering", "unittest from monai.utils import PerfContext results: dict = dict() class", "pc: tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f\"time to", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #", "= time.time() - self.start_time name = self.getDescription(test) self.stream.write(f\"Finished test: {name}", "%(default)d)\", ) parser.add_argument( \"-v\", \"--verbosity\", action=\"store\", dest=\"verbosity\", type=int, default=1, help=\"Verbosity", "tests (default: '%(default)s')\", ) parser.add_argument( \"-t\", \"--thresh\", dest=\"thresh\", default=10.0, type=float,", "PerfContext() as pc: tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time", "= parser.parse_args() print(f\"Running tests in folder: '{args.path}'\") if args.pattern: print(f\"With", "> thresh, results.items())) if len(results) == 0: return print(f\"\\n\\n{status}, printing", "dest=\"thresh\", default=10.0, type=float, help=\"Display tests longer than given threshold (default:", "(default: %(default)d)\", ) parser.add_argument(\"-q\", \"--quick\", action=\"store_true\", dest=\"quick\", default=False, help=\"Only do", "keyboard interruption try: test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh, \"tests", "from some path with some pattern) with PerfContext() as pc:", "match tests (default: '%(default)s')\", ) parser.add_argument( \"-t\", \"--thresh\", dest=\"thresh\", default=10.0,", "name in results: raise AssertionError(\"expected all keys to be unique\")", "to start discovery (default: '%(default)s')\" ) parser.add_argument( \"-p\", action=\"store\", dest=\"pattern\",", "tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f\"time to discover", "argparse import inspect import os import sys import time import", "os import sys import time import unittest from monai.utils import", "\"License\"); # you may not use this file except in", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "help=\"Only do quick tests\") parser.add_argument( \"-f\", \"--failfast\", action=\"store_true\", dest=\"failfast\", default=False,", "tests: {discovery_time}s\") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) #", "get_default_pattern(loader) # Parse input arguments args = parse_args(default_pattern) # If", "params[\"pattern\"] if __name__ == \"__main__\": loader = unittest.TestLoader() default_pattern =", "print(f\"total testing time: {sum(results.values()):.03}s\") print(\"Remember to check above times for", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "loader = unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse input arguments", "test end, get time, print, store and do normal behaviour.\"\"\"", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= {k: v.default for k, v in signature.parameters.items() if v.default", ") parser.add_argument(\"-q\", \"--quick\", action=\"store_true\", dest=\"quick\", default=False, help=\"Only do quick tests\")", "keep results >= threshold results = dict(filter(lambda x: x[1] >", "results: raise AssertionError(\"expected all keys to be unique\") results[name] =", "class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the default results so that we can", "You may obtain a copy of the License at #", "PerfContext results: dict = dict() class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload the default", "some path with some pattern) with PerfContext() as pc: tests", "params = {k: v.default for k, v in signature.parameters.items() if", "encountering exception or keyboard interruption try: test_result = test_runner.run(tests) print_results(results,", "item: item[1])) for r in timings: if timings[r] >= thresh:", "the Apache License, Version 2.0 (the \"License\"); # you may", "times >{thresh}s in ascending order...\\n\") timings = dict(sorted(results.items(), key=lambda item:", "monai.utils import PerfContext results: dict = dict() class TimeLoggingTestResult(unittest.TextTestResult): \"\"\"Overload", ">= threshold results = dict(filter(lambda x: x[1] > thresh, results.items()))", "len(results) == 0: return print(f\"\\n\\n{status}, printing completed times >{thresh}s in" ]
[ "0 or values.size == 0: return new_values.copy() # make sure", "values.ndim if periods > 0: axis_indexer[axis] = slice(None, periods) else:", "periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value #", "make sure array sent to np.roll is c_contiguous f_ordered =", "\"\"\" transforms.py is for shape-preserving functions. \"\"\" import numpy as", "axis=axis, ) axis_indexer = [slice(None)] * values.ndim if periods >", "axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value # restore original", "is for shape-preserving functions. \"\"\" import numpy as np def", "import numpy as np def shift(values: np.ndarray, periods: int, axis:", "else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value # restore", "to np.roll is c_contiguous f_ordered = values.flags.f_contiguous if f_ordered: new_values", "# make sure array sent to np.roll is c_contiguous f_ordered", "if periods == 0 or values.size == 0: return new_values.copy()", "== 0: return new_values.copy() # make sure array sent to", "new_values[tuple(axis_indexer)] = fill_value # restore original order if f_ordered: new_values", "c_contiguous f_ordered = values.flags.f_contiguous if f_ordered: new_values = new_values.T axis", "f_ordered: new_values = new_values.T axis = new_values.ndim - axis -", "new_values, np.intp(periods), axis=axis, ) axis_indexer = [slice(None)] * values.ndim if", "if f_ordered: new_values = new_values.T axis = new_values.ndim - axis", "restore original order if f_ordered: new_values = new_values.T return new_values", "periods: int, axis: int, fill_value) -> np.ndarray: new_values = values", "transforms.py is for shape-preserving functions. \"\"\" import numpy as np", "== 0 or values.size == 0: return new_values.copy() # make", "# restore original order if f_ordered: new_values = new_values.T return", "axis = new_values.ndim - axis - 1 if new_values.size: new_values", "new_values.copy() # make sure array sent to np.roll is c_contiguous", "values.flags.f_contiguous if f_ordered: new_values = new_values.T axis = new_values.ndim -", "0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] = slice(periods, None)", "axis_indexer = [slice(None)] * values.ndim if periods > 0: axis_indexer[axis]", "* values.ndim if periods > 0: axis_indexer[axis] = slice(None, periods)", "new_values = new_values.T axis = new_values.ndim - axis - 1", ") axis_indexer = [slice(None)] * values.ndim if periods > 0:", "1 if new_values.size: new_values = np.roll( new_values, np.intp(periods), axis=axis, )", "functions. \"\"\" import numpy as np def shift(values: np.ndarray, periods:", "values if periods == 0 or values.size == 0: return", "np.ndarray: new_values = values if periods == 0 or values.size", "slice(periods, None) new_values[tuple(axis_indexer)] = fill_value # restore original order if", "[slice(None)] * values.ndim if periods > 0: axis_indexer[axis] = slice(None,", "shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray: new_values", "np.intp(periods), axis=axis, ) axis_indexer = [slice(None)] * values.ndim if periods", "= [slice(None)] * values.ndim if periods > 0: axis_indexer[axis] =", "= slice(None, periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] =", "f_ordered = values.flags.f_contiguous if f_ordered: new_values = new_values.T axis =", "def shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray:", "0: return new_values.copy() # make sure array sent to np.roll", "\"\"\" import numpy as np def shift(values: np.ndarray, periods: int,", "> 0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] = slice(periods,", "slice(None, periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value", "= new_values.T axis = new_values.ndim - axis - 1 if", "if periods > 0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis]", "np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray: new_values =", "as np def shift(values: np.ndarray, periods: int, axis: int, fill_value)", "axis: int, fill_value) -> np.ndarray: new_values = values if periods", "= values.flags.f_contiguous if f_ordered: new_values = new_values.T axis = new_values.ndim", "= new_values.ndim - axis - 1 if new_values.size: new_values =", "if new_values.size: new_values = np.roll( new_values, np.intp(periods), axis=axis, ) axis_indexer", "np def shift(values: np.ndarray, periods: int, axis: int, fill_value) ->", "np.roll( new_values, np.intp(periods), axis=axis, ) axis_indexer = [slice(None)] * values.ndim", "or values.size == 0: return new_values.copy() # make sure array", "-> np.ndarray: new_values = values if periods == 0 or", "return new_values.copy() # make sure array sent to np.roll is", "- axis - 1 if new_values.size: new_values = np.roll( new_values,", "axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)]", "shape-preserving functions. \"\"\" import numpy as np def shift(values: np.ndarray,", "sure array sent to np.roll is c_contiguous f_ordered = values.flags.f_contiguous", "int, fill_value) -> np.ndarray: new_values = values if periods ==", "- 1 if new_values.size: new_values = np.roll( new_values, np.intp(periods), axis=axis,", "new_values.T axis = new_values.ndim - axis - 1 if new_values.size:", "periods > 0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] =", "axis - 1 if new_values.size: new_values = np.roll( new_values, np.intp(periods),", "= fill_value # restore original order if f_ordered: new_values =", "= slice(periods, None) new_values[tuple(axis_indexer)] = fill_value # restore original order", "fill_value) -> np.ndarray: new_values = values if periods == 0", "new_values.ndim - axis - 1 if new_values.size: new_values = np.roll(", "np.roll is c_contiguous f_ordered = values.flags.f_contiguous if f_ordered: new_values =", "numpy as np def shift(values: np.ndarray, periods: int, axis: int,", "is c_contiguous f_ordered = values.flags.f_contiguous if f_ordered: new_values = new_values.T", "new_values.size: new_values = np.roll( new_values, np.intp(periods), axis=axis, ) axis_indexer =", "None) new_values[tuple(axis_indexer)] = fill_value # restore original order if f_ordered:", "new_values = np.roll( new_values, np.intp(periods), axis=axis, ) axis_indexer = [slice(None)]", "sent to np.roll is c_contiguous f_ordered = values.flags.f_contiguous if f_ordered:", "periods == 0 or values.size == 0: return new_values.copy() #", "values.size == 0: return new_values.copy() # make sure array sent", "= values if periods == 0 or values.size == 0:", "fill_value # restore original order if f_ordered: new_values = new_values.T", "array sent to np.roll is c_contiguous f_ordered = values.flags.f_contiguous if", "int, axis: int, fill_value) -> np.ndarray: new_values = values if", "for shape-preserving functions. \"\"\" import numpy as np def shift(values:", "new_values = values if periods == 0 or values.size ==", "= np.roll( new_values, np.intp(periods), axis=axis, ) axis_indexer = [slice(None)] *" ]
[ "__future__ import unicode_literals from django.db import models from django.utils.translation import", "title = models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader = models.OneToOneField( 'Student', verbose_name=_('Leader'),", "models from django.utils.translation import ugettext as _ class Group(models.Model): \"\"\"", "def __str__(self): if self.leader: return '{} ({} {})'.format( self.title, self.leader.first_name,", "self.leader: return '{} ({} {})'.format( self.title, self.leader.first_name, self.leader.last_name) else: return", "verbose_name_plural = _('Groups') def __str__(self): if self.leader: return '{} ({}", "return '{} ({} {})'.format( self.title, self.leader.first_name, self.leader.last_name) else: return '{}'.format(None)", "from __future__ import unicode_literals from django.db import models from django.utils.translation", "null=True, on_delete=models.SET_NULL) notes = models.TextField(blank=True, verbose_name=_('Additional notices')) class Meta(object): verbose_name", "unicode_literals from django.db import models from django.utils.translation import ugettext as", "Group model \"\"\" title = models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader =", "verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL) notes = models.TextField(blank=True, verbose_name=_('Additional notices')) class", "leader = models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL) notes =", "notes = models.TextField(blank=True, verbose_name=_('Additional notices')) class Meta(object): verbose_name = _('Group')", "= models.TextField(blank=True, verbose_name=_('Additional notices')) class Meta(object): verbose_name = _('Group') verbose_name_plural", "import models from django.utils.translation import ugettext as _ class Group(models.Model):", "<filename>students/models/group.py from __future__ import unicode_literals from django.db import models from", "model \"\"\" title = models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader = models.OneToOneField(", "_('Group') verbose_name_plural = _('Groups') def __str__(self): if self.leader: return '{}", "on_delete=models.SET_NULL) notes = models.TextField(blank=True, verbose_name=_('Additional notices')) class Meta(object): verbose_name =", "\"\"\" Group model \"\"\" title = models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader", "= models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader = models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True,", "= _('Groups') def __str__(self): if self.leader: return '{} ({} {})'.format(", "verbose_name=_('Additional notices')) class Meta(object): verbose_name = _('Group') verbose_name_plural = _('Groups')", "_('Groups') def __str__(self): if self.leader: return '{} ({} {})'.format( self.title,", "\"\"\" title = models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader = models.OneToOneField( 'Student',", "models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader = models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True, null=True,", "as _ class Group(models.Model): \"\"\" Group model \"\"\" title =", "models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL) notes = models.TextField(blank=True, verbose_name=_('Additional", "= _('Group') verbose_name_plural = _('Groups') def __str__(self): if self.leader: return", "django.db import models from django.utils.translation import ugettext as _ class", "class Meta(object): verbose_name = _('Group') verbose_name_plural = _('Groups') def __str__(self):", "if self.leader: return '{} ({} {})'.format( self.title, self.leader.first_name, self.leader.last_name) else:", "verbose_name=_('Name')) leader = models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL) notes", "blank=False, verbose_name=_('Name')) leader = models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL)", "Group(models.Model): \"\"\" Group model \"\"\" title = models.CharField(max_length=256, blank=False, verbose_name=_('Name'))", "django.utils.translation import ugettext as _ class Group(models.Model): \"\"\" Group model", "from django.db import models from django.utils.translation import ugettext as _", "from django.utils.translation import ugettext as _ class Group(models.Model): \"\"\" Group", "'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL) notes = models.TextField(blank=True, verbose_name=_('Additional notices'))", "notices')) class Meta(object): verbose_name = _('Group') verbose_name_plural = _('Groups') def", "verbose_name = _('Group') verbose_name_plural = _('Groups') def __str__(self): if self.leader:", "= models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL) notes = models.TextField(blank=True,", "_ class Group(models.Model): \"\"\" Group model \"\"\" title = models.CharField(max_length=256,", "blank=True, null=True, on_delete=models.SET_NULL) notes = models.TextField(blank=True, verbose_name=_('Additional notices')) class Meta(object):", "import unicode_literals from django.db import models from django.utils.translation import ugettext", "class Group(models.Model): \"\"\" Group model \"\"\" title = models.CharField(max_length=256, blank=False,", "import ugettext as _ class Group(models.Model): \"\"\" Group model \"\"\"", "Meta(object): verbose_name = _('Group') verbose_name_plural = _('Groups') def __str__(self): if", "ugettext as _ class Group(models.Model): \"\"\" Group model \"\"\" title", "models.TextField(blank=True, verbose_name=_('Additional notices')) class Meta(object): verbose_name = _('Group') verbose_name_plural =", "__str__(self): if self.leader: return '{} ({} {})'.format( self.title, self.leader.first_name, self.leader.last_name)" ]
[ "abc import ABCMeta, abstractmethod from frontegg.helpers.frontegg_urls import frontegg_urls import typing", "import logger from jwt import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey =", "', '') if verify: public_key = self.get_public_key() logger.debug('got public key'", "locally, will fetch public key') reties = 0 while reties", "logger.debug('found authorization header: ' + str(authorization_header)) jwt_token = authorization_header.replace('Bearer ',", "str(e)) logger.error('failed to get public key in all retries') def", "logger.error( 'could not get public key from frontegg, retry number", "typing.Optional[bool] = True): if not authorization_header: raise InvalidTokenError('Authorization headers is", "= self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data = response.json() return data.get('publicKey') def", "from frontegg, retry number - ' + str(reties) + ',", "+ str(reties) + ', ' + str(e)) logger.error('failed to get", "if not authorization_header: raise InvalidTokenError('Authorization headers is missing') logger.debug('found authorization", "as e: reties = reties + 1 logger.error( 'could not", "verify: public_key = self.get_public_key() logger.debug('got public key' + str(public_key)) decoded", "logger.error('failed to get public key in all retries') def fetch_public_key(self)", "not find public key locally, will fetch public key') reties", "' + str(reties) + ', ' + str(e)) logger.error('failed to", "authorization_header: raise InvalidTokenError('Authorization headers is missing') logger.debug('found authorization header: '", "reties = 0 while reties < 10: try: self.__publicKey =", "@abstractmethod def should_refresh_vendor_token(self) -> bool: pass @abstractmethod def refresh_vendor_token(self) ->", "get_public_key(self) -> str: if self.__publicKey: return self.__publicKey logger.info('could not find", "logger.info('could not find public key locally, will fetch public key')", "retry number - ' + str(reties) + ', ' +", "'could not get public key from frontegg, retry number -", "in all retries') def fetch_public_key(self) -> str: if self.should_refresh_vendor_token: self.refresh_vendor_token()", "public key') reties = 0 while reties < 10: try:", "logger from jwt import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey = None", "+ 1 logger.error( 'could not get public key from frontegg,", "', ' + str(e)) logger.error('failed to get public key in", "= jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt was decoded successfully') logger.debug('JWT value", "frontegg.helpers.logger import logger from jwt import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey", "return self.__publicKey logger.info('could not find public key locally, will fetch", "- ' + str(reties) + ', ' + str(e)) logger.error('failed", "import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey = None @property @abstractmethod def", "InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey = None @property @abstractmethod def vendor_session_request(self)", "Exception as e: reties = reties + 1 logger.error( 'could", "10: try: self.__publicKey = self.fetch_public_key() return self.__publicKey except Exception as", "__publicKey = None @property @abstractmethod def vendor_session_request(self) -> requests.Session: pass", "1 logger.error( 'could not get public key from frontegg, retry", "= authorization_header.replace('Bearer ', '') if verify: public_key = self.get_public_key() logger.debug('got", "retries') def fetch_public_key(self) -> str: if self.should_refresh_vendor_token: self.refresh_vendor_token() response =", "import typing import jwt import requests from frontegg.helpers.logger import logger", "@abstractmethod def vendor_session_request(self) -> requests.Session: pass @property @abstractmethod def should_refresh_vendor_token(self)", "0 while reties < 10: try: self.__publicKey = self.fetch_public_key() return", "missing') logger.debug('found authorization header: ' + str(authorization_header)) jwt_token = authorization_header.replace('Bearer", "if self.should_refresh_vendor_token: self.refresh_vendor_token() response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data =", "get public key in all retries') def fetch_public_key(self) -> str:", "self.__publicKey = self.fetch_public_key() return self.__publicKey except Exception as e: reties", "key in all retries') def fetch_public_key(self) -> str: if self.should_refresh_vendor_token:", "response.json() return data.get('publicKey') def decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True):", "key locally, will fetch public key') reties = 0 while", "abstractmethod from frontegg.helpers.frontegg_urls import frontegg_urls import typing import jwt import", "-> str: if self.__publicKey: return self.__publicKey logger.info('could not find public", "str(authorization_header)) jwt_token = authorization_header.replace('Bearer ', '') if verify: public_key =", "import requests from frontegg.helpers.logger import logger from jwt import InvalidTokenError", "self.get_public_key() logger.debug('got public key' + str(public_key)) decoded = jwt.decode(jwt_token, public_key,", "public_key = self.get_public_key() logger.debug('got public key' + str(public_key)) decoded =", "def get_public_key(self) -> str: if self.__publicKey: return self.__publicKey logger.info('could not", "from abc import ABCMeta, abstractmethod from frontegg.helpers.frontegg_urls import frontegg_urls import", "def decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True): if not authorization_header:", "pass @property @abstractmethod def should_refresh_vendor_token(self) -> bool: pass @abstractmethod def", "decoded = jwt.decode(jwt_token, public_key, algorithms='RS256') else: decoded = jwt.decode(jwt_token, algorithms='RS256',", "str(reties) + ', ' + str(e)) logger.error('failed to get public", "typing import jwt import requests from frontegg.helpers.logger import logger from", "requests.Session: pass @property @abstractmethod def should_refresh_vendor_token(self) -> bool: pass @abstractmethod", "reties < 10: try: self.__publicKey = self.fetch_public_key() return self.__publicKey except", "= reties + 1 logger.error( 'could not get public key", "verify=False) logger.info('jwt was decoded successfully') logger.debug('JWT value - ' +", "to get public key in all retries') def fetch_public_key(self) ->", "all retries') def fetch_public_key(self) -> str: if self.should_refresh_vendor_token: self.refresh_vendor_token() response", "return data.get('publicKey') def decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True): if", "while reties < 10: try: self.__publicKey = self.fetch_public_key() return self.__publicKey", "jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt was decoded successfully') logger.debug('JWT value -", "self.__publicKey logger.info('could not find public key locally, will fetch public", "def refresh_vendor_token(self) -> None: pass def get_public_key(self) -> str: if", "frontegg, retry number - ' + str(reties) + ', '", "True): if not authorization_header: raise InvalidTokenError('Authorization headers is missing') logger.debug('found", "authorization_header.replace('Bearer ', '') if verify: public_key = self.get_public_key() logger.debug('got public", "from frontegg.helpers.logger import logger from jwt import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta):", "= response.json() return data.get('publicKey') def decode_jwt(self, authorization_header, verify: typing.Optional[bool] =", "IdentityClientMixin(metaclass=ABCMeta): __publicKey = None @property @abstractmethod def vendor_session_request(self) -> requests.Session:", "def vendor_session_request(self) -> requests.Session: pass @property @abstractmethod def should_refresh_vendor_token(self) ->", "key') reties = 0 while reties < 10: try: self.__publicKey", "str: if self.__publicKey: return self.__publicKey logger.info('could not find public key", "authorization header: ' + str(authorization_header)) jwt_token = authorization_header.replace('Bearer ', '')", "reties = reties + 1 logger.error( 'could not get public", "@property @abstractmethod def should_refresh_vendor_token(self) -> bool: pass @abstractmethod def refresh_vendor_token(self)", "def should_refresh_vendor_token(self) -> bool: pass @abstractmethod def refresh_vendor_token(self) -> None:", "self.__publicKey except Exception as e: reties = reties + 1", "jwt import requests from frontegg.helpers.logger import logger from jwt import", "raise InvalidTokenError('Authorization headers is missing') logger.debug('found authorization header: ' +", "self.refresh_vendor_token() response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data = response.json() return", "InvalidTokenError('Authorization headers is missing') logger.debug('found authorization header: ' + str(authorization_header))", "None @property @abstractmethod def vendor_session_request(self) -> requests.Session: pass @property @abstractmethod", "= None @property @abstractmethod def vendor_session_request(self) -> requests.Session: pass @property", "self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data = response.json() return data.get('publicKey') def decode_jwt(self,", "bool: pass @abstractmethod def refresh_vendor_token(self) -> None: pass def get_public_key(self)", "-> requests.Session: pass @property @abstractmethod def should_refresh_vendor_token(self) -> bool: pass", "headers is missing') logger.debug('found authorization header: ' + str(authorization_header)) jwt_token", "public_key, algorithms='RS256') else: decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt was", "import frontegg_urls import typing import jwt import requests from frontegg.helpers.logger", "number - ' + str(reties) + ', ' + str(e))", "public key in all retries') def fetch_public_key(self) -> str: if", "key' + str(public_key)) decoded = jwt.decode(jwt_token, public_key, algorithms='RS256') else: decoded", "+ str(public_key)) decoded = jwt.decode(jwt_token, public_key, algorithms='RS256') else: decoded =", "decoded successfully') logger.debug('JWT value - ' + str(decoded)) return decoded", "@property @abstractmethod def vendor_session_request(self) -> requests.Session: pass @property @abstractmethod def", "+ ', ' + str(e)) logger.error('failed to get public key", "pass def get_public_key(self) -> str: if self.__publicKey: return self.__publicKey logger.info('could", "self.__publicKey: return self.__publicKey logger.info('could not find public key locally, will", "except Exception as e: reties = reties + 1 logger.error(", "header: ' + str(authorization_header)) jwt_token = authorization_header.replace('Bearer ', '') if", "not get public key from frontegg, retry number - '", "requests from frontegg.helpers.logger import logger from jwt import InvalidTokenError class", "find public key locally, will fetch public key') reties =", "-> bool: pass @abstractmethod def refresh_vendor_token(self) -> None: pass def", "verify: typing.Optional[bool] = True): if not authorization_header: raise InvalidTokenError('Authorization headers", "refresh_vendor_token(self) -> None: pass def get_public_key(self) -> str: if self.__publicKey:", "self.fetch_public_key() return self.__publicKey except Exception as e: reties = reties", "from frontegg.helpers.frontegg_urls import frontegg_urls import typing import jwt import requests", "data = response.json() return data.get('publicKey') def decode_jwt(self, authorization_header, verify: typing.Optional[bool]", "pass @abstractmethod def refresh_vendor_token(self) -> None: pass def get_public_key(self) ->", "response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data = response.json() return data.get('publicKey')", "get public key from frontegg, retry number - ' +", "@abstractmethod def refresh_vendor_token(self) -> None: pass def get_public_key(self) -> str:", "= self.get_public_key() logger.debug('got public key' + str(public_key)) decoded = jwt.decode(jwt_token,", "str: if self.should_refresh_vendor_token: self.refresh_vendor_token() response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data", "= 0 while reties < 10: try: self.__publicKey = self.fetch_public_key()", "+ str(authorization_header)) jwt_token = authorization_header.replace('Bearer ', '') if verify: public_key", "jwt_token = authorization_header.replace('Bearer ', '') if verify: public_key = self.get_public_key()", "jwt import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey = None @property @abstractmethod", "-> str: if self.should_refresh_vendor_token: self.refresh_vendor_token() response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status()", "frontegg_urls import typing import jwt import requests from frontegg.helpers.logger import", "if self.__publicKey: return self.__publicKey logger.info('could not find public key locally,", "try: self.__publicKey = self.fetch_public_key() return self.__publicKey except Exception as e:", "response.raise_for_status() data = response.json() return data.get('publicKey') def decode_jwt(self, authorization_header, verify:", "authorization_header, verify: typing.Optional[bool] = True): if not authorization_header: raise InvalidTokenError('Authorization", "vendor_session_request(self) -> requests.Session: pass @property @abstractmethod def should_refresh_vendor_token(self) -> bool:", "-> None: pass def get_public_key(self) -> str: if self.__publicKey: return", "will fetch public key') reties = 0 while reties <", "import jwt import requests from frontegg.helpers.logger import logger from jwt", "should_refresh_vendor_token(self) -> bool: pass @abstractmethod def refresh_vendor_token(self) -> None: pass", "algorithms='RS256', verify=False) logger.info('jwt was decoded successfully') logger.debug('JWT value - '", "logger.debug('got public key' + str(public_key)) decoded = jwt.decode(jwt_token, public_key, algorithms='RS256')", "class IdentityClientMixin(metaclass=ABCMeta): __publicKey = None @property @abstractmethod def vendor_session_request(self) ->", "key from frontegg, retry number - ' + str(reties) +", "decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True): if not authorization_header: raise", "else: decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt was decoded successfully')", "if verify: public_key = self.get_public_key() logger.debug('got public key' + str(public_key))", "frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data = response.json() return data.get('publicKey') def decode_jwt(self, authorization_header,", "data.get('publicKey') def decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True): if not", "' + str(authorization_header)) jwt_token = authorization_header.replace('Bearer ', '') if verify:", "reties + 1 logger.error( 'could not get public key from", "def fetch_public_key(self) -> str: if self.should_refresh_vendor_token: self.refresh_vendor_token() response = self.vendor_session_request.get(", "= True): if not authorization_header: raise InvalidTokenError('Authorization headers is missing')", "import ABCMeta, abstractmethod from frontegg.helpers.frontegg_urls import frontegg_urls import typing import", "+ str(e)) logger.error('failed to get public key in all retries')", "not authorization_header: raise InvalidTokenError('Authorization headers is missing') logger.debug('found authorization header:", "e: reties = reties + 1 logger.error( 'could not get", "fetch_public_key(self) -> str: if self.should_refresh_vendor_token: self.refresh_vendor_token() response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config'])", "' + str(e)) logger.error('failed to get public key in all", "decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt was decoded successfully') logger.debug('JWT", "from jwt import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey = None @property", "was decoded successfully') logger.debug('JWT value - ' + str(decoded)) return", "ABCMeta, abstractmethod from frontegg.helpers.frontegg_urls import frontegg_urls import typing import jwt", "self.should_refresh_vendor_token: self.refresh_vendor_token() response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data = response.json()", "= self.fetch_public_key() return self.__publicKey except Exception as e: reties =", "None: pass def get_public_key(self) -> str: if self.__publicKey: return self.__publicKey", "fetch public key') reties = 0 while reties < 10:", "jwt.decode(jwt_token, public_key, algorithms='RS256') else: decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt", "is missing') logger.debug('found authorization header: ' + str(authorization_header)) jwt_token =", "return self.__publicKey except Exception as e: reties = reties +", "< 10: try: self.__publicKey = self.fetch_public_key() return self.__publicKey except Exception", "logger.info('jwt was decoded successfully') logger.debug('JWT value - ' + str(decoded))", "public key from frontegg, retry number - ' + str(reties)", "public key' + str(public_key)) decoded = jwt.decode(jwt_token, public_key, algorithms='RS256') else:", "'') if verify: public_key = self.get_public_key() logger.debug('got public key' +", "algorithms='RS256') else: decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt was decoded", "str(public_key)) decoded = jwt.decode(jwt_token, public_key, algorithms='RS256') else: decoded = jwt.decode(jwt_token,", "= jwt.decode(jwt_token, public_key, algorithms='RS256') else: decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False)", "public key locally, will fetch public key') reties = 0", "frontegg.helpers.frontegg_urls import frontegg_urls import typing import jwt import requests from" ]
[ "def __init__(self, base_client): super().__init__(base_client) def create_action(self, action: Action, query_params: Dict[str,", "Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response,", "of one or two webhook keys. The first key is", "= Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return", "object] = None) -> ActionResult: \"\"\" Returns the status of", "= None) -> Action: \"\"\" Returns a specific action template.", "object] = None) -> List[Action]: \"\"\" Returns the list of", "from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action from", "specific action template. \"\"\" if query_params is None: query_params =", "email action. The status is available for 4 days after", "action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url = self.base_client.build_url(path)", "OpenAPI spec version: v1beta2.12 (recommended default) Generated by: https://openapi-generator.tech \"\"\"", "= self.base_client.get(url, params=query_params) return handle_response(response, ActionResult) def get_action_status_details(self, action_name: str,", "of the invoked email action. The status is available for", "auto-generated. Do not edit! ############# \"\"\" SDC Service: Action Service", "query_params = {} path_params = { } path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params)", "= self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, PublicWebhookKey) def", "base_client): super().__init__(base_client) def create_action(self, action: Action, query_params: Dict[str, object] =", "specific language governing permissions and limitations # under the License.", "\"\"\" Returns the status details of the invoked email action.", "# not use this file except in compliance with the", "import Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult", "Licensed under the Apache License, Version 2.0 (the \"License\"): you", "def get_action_status(self, action_name: str, status_id: str, query_params: Dict[str, object] =", "in Splunk Cloud Services, you can receive incoming trigger events", "splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models import", "= {} path_params = { } path = Template(\"/action/v1beta2/actions\").substitute(path_params) url", "import BaseService from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import", "in compliance with the License. You may obtain # a", "Copyright © 2021 Splunk, Inc. # # Licensed under the", "return handle_response(response, Action) def get_action_status(self, action_name: str, status_id: str, query_params:", "SDC Service: Action Service With the Action service in Splunk", "self.base_client.get(url, params=query_params) return handle_response(response, ActionResult) def get_action_status_details(self, action_name: str, status_id:", "action templates to turn these events into meaningful actions. \"\"\"", "= None) -> List[PublicWebhookKey]: \"\"\" Returns an array of one", "= self.base_client.post(url, json=data, params=query_params) return handle_response(response, ) def update_action(self, action_name:", "Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = trigger_event.to_dict() response = self.base_client.post(url,", "You may obtain # a copy of the License at", "\"\"\" Invokes an action. \"\"\" if query_params is None: query_params", "\"action_name\": action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url =", "meaningful actions. OpenAPI spec version: v1beta2.12 (recommended default) Generated by:", "ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError from", "action templates to turn these events into meaningful actions. OpenAPI", "# coding: utf-8 # Copyright © 2021 Splunk, Inc. #", "Action: \"\"\" Modifies an action template. \"\"\" if query_params is", "handle_response(response, PublicWebhookKey) def list_actions(self, query_params: Dict[str, object] = None) ->", "= { \"action_name\": action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url =", "for 4 days after the last status change. \"\"\" if", "first key is active. The second key, if present, is", "object] = None) -> SSCVoidModel: \"\"\" Invokes an action. \"\"\"", "} path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url,", "Invokes an action. \"\"\" if query_params is None: query_params =", "under the License is distributed on an \"AS IS\" BASIS,", "available for 4 days after the last status change. \"\"\"", "a copy of the License at # # [http://www.apache.org/licenses/LICENSE-2.0] #", "limitations # under the License. ############# This file is auto-generated.", "Action) def delete_action(self, action_name: str, query_params: Dict[str, object] = None)", "expired. \"\"\" if query_params is None: query_params = {} path_params", "self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, Action) def get_action_status(self,", "\"\"\" if query_params is None: query_params = {} path_params =", "return handle_response(response, Action) def delete_action(self, action_name: str, query_params: Dict[str, object]", "object] = None) -> Action: \"\"\" Returns a specific action", "version: v1beta2.12 (recommended default) Generated by: https://openapi-generator.tech \"\"\" from requests", "\"action_name\": action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url =", "this file except in compliance with the License. You may", "None: query_params = {} path_params = { } path =", "response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, Action) def delete_action(self,", "self.base_client.build_url(path) data = trigger_event.to_dict() response = self.base_client.post(url, json=data, params=query_params) return", "str, status_id: str, query_params: Dict[str, object] = None) -> ActionResult:", "under the Apache License, Version 2.0 (the \"License\"): you may", "from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models", "\"action_name\": action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data", "self.base_client.build_url(path) data = action_mutable.to_dict() response = self.base_client.patch(url, json=data, params=query_params) return", "software # distributed under the License is distributed on an", "permissions and limitations # under the License. ############# This file", "\"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url = self.base_client.build_url(path) response", "status change. \"\"\" if query_params is None: query_params = {}", "requests import Response from string import Template from typing import", "= Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.delete(url, params=query_params) return", "of the License at # # [http://www.apache.org/licenses/LICENSE-2.0] # # Unless", "Response from string import Template from typing import List, Dict", "{} path_params = { \"action_name\": action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params)", "import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent", "Dict[str, object] = None) -> Action: \"\"\" Returns a specific", "\"\"\" Returns the list of action templates. \"\"\" if query_params", "json=data, params=query_params) return handle_response(response, Action) def delete_action(self, action_name: str, query_params:", "self.base_client.get(url, params=query_params) return handle_response(response, PublicWebhookKey) def list_actions(self, query_params: Dict[str, object]", "file except in compliance with the License. You may obtain", "None: query_params = {} path_params = { \"action_name\": action_name, }", "ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent class ActionService(BaseService): \"\"\" Action Service", "is None: query_params = {} path_params = { } path", "None) -> SSCVoidModel: \"\"\" Removes an action template. \"\"\" if", "file is auto-generated. Do not edit! ############# \"\"\" SDC Service:", "splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import", "governing permissions and limitations # under the License. ############# This", "or two webhook keys. The first key is active. The", "spec version: v1beta2.12 (recommended default) Generated by: https://openapi-generator.tech \"\"\" from", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "(recommended default) Generated by: https://openapi-generator.tech \"\"\" from requests import Response", "a specific action template. \"\"\" if query_params is None: query_params", "service in Splunk Cloud Services, you can receive incoming trigger", "was invoked. The status is available for 4 days after", "License at # # [http://www.apache.org/licenses/LICENSE-2.0] # # Unless required by", "templates. \"\"\" if query_params is None: query_params = {} path_params", "that was invoked. The status is available for 4 days", "to turn these events into meaningful actions. OpenAPI spec version:", "\"\"\" Modifies an action template. \"\"\" if query_params is None:", "query_params: Dict[str, object] = None) -> List[PublicWebhookKey]: \"\"\" Returns an", "the Apache License, Version 2.0 (the \"License\"): you may #", "With the Action service in Splunk Cloud Services, you can", "response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResult) def get_action_status_details(self, action_name:", "under the License. ############# This file is auto-generated. Do not", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "splunk_sdk.action.v1beta2.gen_models import TriggerEvent class ActionService(BaseService): \"\"\" Action Service Version: v1beta2.12", "list of action templates. \"\"\" if query_params is None: query_params", "= None) -> SSCVoidModel: \"\"\" Invokes an action. \"\"\" if", "Dict[str, object] = None) -> Action: \"\"\" Creates an action", "Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) data = action.to_dict() response = self.base_client.post(url,", "coding: utf-8 # Copyright © 2021 Splunk, Inc. # #", "language governing permissions and limitations # under the License. #############", "List, Dict from splunk_sdk.base_client import handle_response from splunk_sdk.base_service import BaseService", "Dict[str, object] = None) -> Action: \"\"\" Modifies an action", "to in writing, software # distributed under the License is", "action. \"\"\" if query_params is None: query_params = {} path_params", "def trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params: Dict[str, object] =", "path_params = { \"action_name\": action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url", "use pre-defined action templates to turn these events into meaningful", "object] = None) -> SSCVoidModel: \"\"\" Removes an action template.", "array of one or two webhook keys. The first key", "action.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, Action) def", "= Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return", "The first key is active. The second key, if present,", "params=query_params) return handle_response(response, ActionResult) def get_action_status_details(self, action_name: str, status_id: str,", "Apache License, Version 2.0 (the \"License\"): you may # not", "or agreed to in writing, software # distributed under the", "invoked email action. The status is available for 4 days", "edit! ############# \"\"\" SDC Service: Action Service With the Action", "= self.base_client.get(url, params=query_params) return handle_response(response, PublicWebhookKey) def list_actions(self, query_params: Dict[str,", "required by applicable law or agreed to in writing, software", "############# This file is auto-generated. Do not edit! ############# \"\"\"", "url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, Action)", "query_params: Dict[str, object] = None) -> ActionResult: \"\"\" Returns the", "of an action that was invoked. The status is available", "return handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self, query_params: Dict[str, object] = None)", "None) -> SSCVoidModel: \"\"\" Invokes an action. \"\"\" if query_params", "2.0 (the \"License\"): you may # not use this file", "= {} path_params = { \"action_name\": action_name, } path =", "is expired. \"\"\" if query_params is None: query_params = {}", "agreed to in writing, software # distributed under the License", "import ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail", "self.base_client.post(url, json=data, params=query_params) return handle_response(response, ) def update_action(self, action_name: str,", "data = action_mutable.to_dict() response = self.base_client.patch(url, json=data, params=query_params) return handle_response(response,", "turn these events into meaningful actions. \"\"\" def __init__(self, base_client):", "str, query_params: Dict[str, object] = None) -> SSCVoidModel: \"\"\" Removes", "of action templates. \"\"\" if query_params is None: query_params =", "distributed under the License is distributed on an \"AS IS\"", "None) -> List[Action]: \"\"\" Returns the list of action templates.", "CONDITIONS OF ANY KIND, either express or implied. See the", "trigger events and use pre-defined action templates to turn these", "status is available for 4 days after the last status", "action_name: str, trigger_event: TriggerEvent, query_params: Dict[str, object] = None) ->", "object] = None) -> Action: \"\"\" Modifies an action template.", "= action_mutable.to_dict() response = self.base_client.patch(url, json=data, params=query_params) return handle_response(response, Action)", "{} path_params = { } path = Template(\"/action/v1beta2/actions\").substitute(path_params) url =", "handle_response(response, Action) def get_action_status(self, action_name: str, status_id: str, query_params: Dict[str,", "not use this file except in compliance with the License.", "= self.base_client.post(url, json=data, params=query_params) return handle_response(response, Action) def delete_action(self, action_name:", "query_params: Dict[str, object] = None) -> Action: \"\"\" Modifies an", "None) -> Action: \"\"\" Modifies an action template. \"\"\" if", "BaseService from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action", "} path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.delete(url,", "def get_public_webhook_keys(self, query_params: Dict[str, object] = None) -> List[PublicWebhookKey]: \"\"\"", "writing, software # distributed under the License is distributed on", "= {} path_params = { \"action_name\": action_name, \"status_id\": status_id, }", "List[ActionResultEmailDetail]: \"\"\" Returns the status details of the invoked email", "action_name: str, action_mutable: ActionMutable, query_params: Dict[str, object] = None) ->", "self.base_client.build_url(path) response = self.base_client.delete(url, params=query_params) return handle_response(response, ) def get_action(self,", "response = self.base_client.get(url, params=query_params) return handle_response(response, Action) def get_action_status(self, action_name:", "PublicWebhookKey) def list_actions(self, query_params: Dict[str, object] = None) -> List[Action]:", "action_name: str, status_id: str, query_params: Dict[str, object] = None) ->", "and limitations # under the License. ############# This file is", "Action: \"\"\" Creates an action template. \"\"\" if query_params is", "str, query_params: Dict[str, object] = None) -> ActionResult: \"\"\" Returns", "the License. You may obtain # a copy of the", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "Template from typing import List, Dict from splunk_sdk.base_client import handle_response", "use this file except in compliance with the License. You", "get_action_status(self, action_name: str, status_id: str, query_params: Dict[str, object] = None)", "import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError", "url = self.base_client.build_url(path) response = self.base_client.delete(url, params=query_params) return handle_response(response, )", "2021 Splunk, Inc. # # Licensed under the Apache License,", "keys. The first key is active. The second key, if", "handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self, query_params: Dict[str, object] = None) ->", "invoked. The status is available for 4 days after the", "string import Template from typing import List, Dict from splunk_sdk.base_client", "params=query_params) return handle_response(response, ) def get_action(self, action_name: str, query_params: Dict[str,", "action templates. \"\"\" if query_params is None: query_params = {}", "License, Version 2.0 (the \"License\"): you may # not use", "self.base_client.get(url, params=query_params) return handle_response(response, Action) def get_action_status(self, action_name: str, status_id:", "\"action_name\": action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response", "self.base_client.delete(url, params=query_params) return handle_response(response, ) def get_action(self, action_name: str, query_params:", "Dict[str, object] = None) -> List[Action]: \"\"\" Returns the list", "= None) -> List[Action]: \"\"\" Returns the list of action", "Action Service Version: v1beta2.12 With the Action service in Splunk", "these events into meaningful actions. \"\"\" def __init__(self, base_client): super().__init__(base_client)", "PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent class", "path_params = { \"action_name\": action_name, \"status_id\": status_id, } path =", "} path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url,", "Service: Action Service With the Action service in Splunk Cloud", "= Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) data = action.to_dict() response =", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "KIND, either express or implied. See the # License for", "= self.base_client.build_url(path) data = action_mutable.to_dict() response = self.base_client.patch(url, json=data, params=query_params)", "{ \"action_name\": action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path)", "List[Action]: \"\"\" Returns the list of action templates. \"\"\" if", "handle_response(response, Action) def trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params: Dict[str,", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "= Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = trigger_event.to_dict() response =", "express or implied. See the # License for the specific", "= None) -> ActionResult: \"\"\" Returns the status of an", "response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self, query_params:", "Services, you can receive incoming trigger events and use pre-defined", "ActionResult: \"\"\" Returns the status of an action that was", "splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models import", "Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.delete(url, params=query_params) return handle_response(response,", "= self.base_client.get(url, params=query_params) return handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self, query_params: Dict[str,", "See the # License for the specific language governing permissions", "import ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey", "query_params: Dict[str, object] = None) -> List[ActionResultEmailDetail]: \"\"\" Returns the", "Removes an action template. \"\"\" if query_params is None: query_params", "self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResult) def get_action_status_details(self,", "{ } path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) data =", "handle_response(response, ) def get_action(self, action_name: str, query_params: Dict[str, object] =", "{ \"action_name\": action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url", "Returns the list of action templates. \"\"\" if query_params is", "Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult from", "-> Action: \"\"\" Returns a specific action template. \"\"\" if", "# a copy of the License at # # [http://www.apache.org/licenses/LICENSE-2.0]", "None) -> ActionResult: \"\"\" Returns the status of an action", "path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) data = action.to_dict() response", "= Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return", "pre-defined action templates to turn these events into meaningful actions.", "ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from", "law or agreed to in writing, software # distributed under", "= trigger_event.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, )", "Cloud Services, you can receive incoming trigger events and use", "= { \"action_name\": action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params)", "None) -> Action: \"\"\" Returns a specific action template. \"\"\"", "query_params: Dict[str, object] = None) -> SSCVoidModel: \"\"\" Removes an", "implied. See the # License for the specific language governing", "the list of action templates. \"\"\" if query_params is None:", "SSCVoidModel: \"\"\" Invokes an action. \"\"\" if query_params is None:", "path_params = { } path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path)", "trigger_event.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, ) def", "def get_action(self, action_name: str, query_params: Dict[str, object] = None) ->", "params=query_params) return handle_response(response, PublicWebhookKey) def list_actions(self, query_params: Dict[str, object] =", "= self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, Action) def", "Service Version: v1beta2.12 With the Action service in Splunk Cloud", "{ \"action_name\": action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url", "def list_actions(self, query_params: Dict[str, object] = None) -> List[Action]: \"\"\"", "path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = trigger_event.to_dict() response", "copy of the License at # # [http://www.apache.org/licenses/LICENSE-2.0] # #", "ActionMutable, query_params: Dict[str, object] = None) -> Action: \"\"\" Modifies", "action_mutable: ActionMutable, query_params: Dict[str, object] = None) -> Action: \"\"\"", "update_action(self, action_name: str, action_mutable: ActionMutable, query_params: Dict[str, object] = None)", "Returns an array of one or two webhook keys. The", "\"License\"): you may # not use this file except in", "url = self.base_client.build_url(path) data = trigger_event.to_dict() response = self.base_client.post(url, json=data,", "Inc. # # Licensed under the Apache License, Version 2.0", "from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models", "params=query_params) return handle_response(response, ) def update_action(self, action_name: str, action_mutable: ActionMutable,", "action_name: str, query_params: Dict[str, object] = None) -> SSCVoidModel: \"\"\"", "\"\"\" from requests import Response from string import Template from", "Version 2.0 (the \"License\"): you may # not use this", "an action that was invoked. The status is available for", "= action.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, Action)", "Modifies an action template. \"\"\" if query_params is None: query_params", "Returns the status of an action that was invoked. The", "None) -> List[ActionResultEmailDetail]: \"\"\" Returns the status details of the", "= self.base_client.build_url(path) data = trigger_event.to_dict() response = self.base_client.post(url, json=data, params=query_params)", "path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params)", "import ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent class ActionService(BaseService): \"\"\" Action", "# # [http://www.apache.org/licenses/LICENSE-2.0] # # Unless required by applicable law", "days after the last status change. \"\"\" if query_params is", "TriggerEvent, query_params: Dict[str, object] = None) -> SSCVoidModel: \"\"\" Invokes", "response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, ) def update_action(self,", "path_params = { } path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url = self.base_client.build_url(path)", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "\"\"\" Removes an action template. \"\"\" if query_params is None:", "= { } path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url = self.base_client.build_url(path) response", "query_params = {} path_params = { \"action_name\": action_name, } path", "import Response from string import Template from typing import List,", "# # Licensed under the Apache License, Version 2.0 (the", "} path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = trigger_event.to_dict()", "from splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent class ActionService(BaseService):", "None) -> Action: \"\"\" Creates an action template. \"\"\" if", "is None: query_params = {} path_params = { \"action_name\": action_name,", "from string import Template from typing import List, Dict from", "to turn these events into meaningful actions. \"\"\" def __init__(self,", "handle_response(response, ActionResult) def get_action_status_details(self, action_name: str, status_id: str, query_params: Dict[str,", "trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params: Dict[str, object] = None)", "def delete_action(self, action_name: str, query_params: Dict[str, object] = None) ->", "= {} path_params = { } path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url", "-> List[Action]: \"\"\" Returns the list of action templates. \"\"\"", "actions. \"\"\" def __init__(self, base_client): super().__init__(base_client) def create_action(self, action: Action,", "data = action.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response,", "obtain # a copy of the License at # #", "url = self.base_client.build_url(path) data = action_mutable.to_dict() response = self.base_client.patch(url, json=data,", "get_action_status_details(self, action_name: str, status_id: str, query_params: Dict[str, object] = None)", "ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from", "get_action(self, action_name: str, query_params: Dict[str, object] = None) -> Action:", "at # # [http://www.apache.org/licenses/LICENSE-2.0] # # Unless required by applicable", "v1beta2.12 With the Action service in Splunk Cloud Services, you", "License for the specific language governing permissions and limitations #", "the License at # # [http://www.apache.org/licenses/LICENSE-2.0] # # Unless required", "\"\"\" Returns the status of an action that was invoked.", "= Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return", "= self.base_client.delete(url, params=query_params) return handle_response(response, ) def get_action(self, action_name: str,", "Returns a specific action template. \"\"\" if query_params is None:", "status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url = self.base_client.build_url(path) response =", "\"\"\" Returns an array of one or two webhook keys.", "Action Service With the Action service in Splunk Cloud Services,", "ActionService(BaseService): \"\"\" Action Service Version: v1beta2.12 With the Action service", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "TriggerEvent class ActionService(BaseService): \"\"\" Action Service Version: v1beta2.12 With the", "SSCVoidModel: \"\"\" Removes an action template. \"\"\" if query_params is", "by: https://openapi-generator.tech \"\"\" from requests import Response from string import", "None: query_params = {} path_params = { \"action_name\": action_name, \"status_id\":", "status of an action that was invoked. The status is", ") def update_action(self, action_name: str, action_mutable: ActionMutable, query_params: Dict[str, object]", "Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response,", "self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, PublicWebhookKey) def list_actions(self,", "Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response,", "} path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url,", "# Licensed under the Apache License, Version 2.0 (the \"License\"):", "if present, is expired. \"\"\" if query_params is None: query_params", "= Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return", "import handle_response from splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel import SSCModel,", "query_params: Dict[str, object] = None) -> List[Action]: \"\"\" Returns the", "Returns the status details of the invoked email action. The", "Action service in Splunk Cloud Services, you can receive incoming", "class ActionService(BaseService): \"\"\" Action Service Version: v1beta2.12 With the Action", "self.base_client.get(url, params=query_params) return handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self, query_params: Dict[str, object]", "} path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url,", "params=query_params) return handle_response(response, Action) def trigger_action(self, action_name: str, trigger_event: TriggerEvent,", "License. ############# This file is auto-generated. Do not edit! #############", "two webhook keys. The first key is active. The second", "query_params: Dict[str, object] = None) -> Action: \"\"\" Creates an", "second key, if present, is expired. \"\"\" if query_params is", "return handle_response(response, Action) def trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params:", "splunk_sdk.base_client import handle_response from splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel import", "############# \"\"\" SDC Service: Action Service With the Action service", "https://openapi-generator.tech \"\"\" from requests import Response from string import Template", "template. \"\"\" if query_params is None: query_params = {} path_params", "self.base_client.get(url, params=query_params) return handle_response(response, Action) def trigger_action(self, action_name: str, trigger_event:", "response = self.base_client.get(url, params=query_params) return handle_response(response, Action) def trigger_action(self, action_name:", "from splunk_sdk.action.v1beta2.gen_models import TriggerEvent class ActionService(BaseService): \"\"\" Action Service Version:", "super().__init__(base_client) def create_action(self, action: Action, query_params: Dict[str, object] = None)", "path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params)", "actions. OpenAPI spec version: v1beta2.12 (recommended default) Generated by: https://openapi-generator.tech", "query_params: Dict[str, object] = None) -> SSCVoidModel: \"\"\" Invokes an", "query_params: Dict[str, object] = None) -> Action: \"\"\" Returns a", "compliance with the License. You may obtain # a copy", "} path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url,", "{} path_params = { \"action_name\": action_name, \"status_id\": status_id, } path", "= self.base_client.build_url(path) data = action.to_dict() response = self.base_client.post(url, json=data, params=query_params)", "into meaningful actions. OpenAPI spec version: v1beta2.12 (recommended default) Generated", "\"\"\" Returns a specific action template. \"\"\" if query_params is", "Action: \"\"\" Returns a specific action template. \"\"\" if query_params", "change. \"\"\" if query_params is None: query_params = {} path_params", "str, trigger_event: TriggerEvent, query_params: Dict[str, object] = None) -> SSCVoidModel:", "\"\"\" SDC Service: Action Service With the Action service in", "Splunk Cloud Services, you can receive incoming trigger events and", "the License. ############# This file is auto-generated. Do not edit!", "the Action service in Splunk Cloud Services, you can receive", "response = self.base_client.delete(url, params=query_params) return handle_response(response, ) def get_action(self, action_name:", "if query_params is None: query_params = {} path_params = {", "action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url = self.base_client.build_url(path)", "[http://www.apache.org/licenses/LICENSE-2.0] # # Unless required by applicable law or agreed", "Action) def get_action_status(self, action_name: str, status_id: str, query_params: Dict[str, object]", "url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResult)", "Service With the Action service in Splunk Cloud Services, you", "the # License for the specific language governing permissions and", "Action) def trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params: Dict[str, object]", "Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = action_mutable.to_dict() response = self.base_client.patch(url,", "# # Unless required by applicable law or agreed to", "an action. \"\"\" if query_params is None: query_params = {}", "response = self.base_client.get(url, params=query_params) return handle_response(response, PublicWebhookKey) def list_actions(self, query_params:", "delete_action(self, action_name: str, query_params: Dict[str, object] = None) -> SSCVoidModel:", "handle_response(response, Action) def delete_action(self, action_name: str, query_params: Dict[str, object] =", "an action template. \"\"\" if query_params is None: query_params =", "list_actions(self, query_params: Dict[str, object] = None) -> List[Action]: \"\"\" Returns", "-> ActionResult: \"\"\" Returns the status of an action that", "4 days after the last status change. \"\"\" if query_params", "str, status_id: str, query_params: Dict[str, object] = None) -> List[ActionResultEmailDetail]:", "-> List[PublicWebhookKey]: \"\"\" Returns an array of one or two", "self.base_client.build_url(path) data = action.to_dict() response = self.base_client.post(url, json=data, params=query_params) return", "Action, query_params: Dict[str, object] = None) -> Action: \"\"\" Creates", "into meaningful actions. \"\"\" def __init__(self, base_client): super().__init__(base_client) def create_action(self,", "Version: v1beta2.12 With the Action service in Splunk Cloud Services,", "{ } path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) response =", "params=query_params) return handle_response(response, Action) def get_action_status(self, action_name: str, status_id: str,", "last status change. \"\"\" if query_params is None: query_params =", "str, query_params: Dict[str, object] = None) -> Action: \"\"\" Returns", "SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable", "path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params)", "not edit! ############# \"\"\" SDC Service: Action Service With the", "get_public_webhook_keys(self, query_params: Dict[str, object] = None) -> List[PublicWebhookKey]: \"\"\" Returns", "splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import", "import List, Dict from splunk_sdk.base_client import handle_response from splunk_sdk.base_service import", "path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = action_mutable.to_dict() response", "def get_action_status_details(self, action_name: str, status_id: str, query_params: Dict[str, object] =", "receive incoming trigger events and use pre-defined action templates to", "turn these events into meaningful actions. OpenAPI spec version: v1beta2.12", "self.base_client.post(url, json=data, params=query_params) return handle_response(response, Action) def delete_action(self, action_name: str,", "query_params = {} path_params = { \"action_name\": action_name, \"status_id\": status_id,", "(the \"License\"): you may # not use this file except", "The status is available for 4 days after the last", "templates to turn these events into meaningful actions. OpenAPI spec", "= None) -> SSCVoidModel: \"\"\" Removes an action template. \"\"\"", "by applicable law or agreed to in writing, software #", "status details of the invoked email action. The status is", "object] = None) -> List[ActionResultEmailDetail]: \"\"\" Returns the status details", "This file is auto-generated. Do not edit! ############# \"\"\" SDC", "key, if present, is expired. \"\"\" if query_params is None:", "© 2021 Splunk, Inc. # # Licensed under the Apache", "the invoked email action. The status is available for 4", "SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable from", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response,", "Dict[str, object] = None) -> ActionResult: \"\"\" Returns the status", "events into meaningful actions. OpenAPI spec version: v1beta2.12 (recommended default)", "object] = None) -> Action: \"\"\" Creates an action template.", "= self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResultEmailDetail) def", "Generated by: https://openapi-generator.tech \"\"\" from requests import Response from string", "} path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) data = action.to_dict()", "= { } path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) data", "ActionResultEmailDetail) def get_public_webhook_keys(self, query_params: Dict[str, object] = None) -> List[PublicWebhookKey]:", "return handle_response(response, ) def get_action(self, action_name: str, query_params: Dict[str, object]", "Dict[str, object] = None) -> SSCVoidModel: \"\"\" Removes an action", "object] = None) -> List[PublicWebhookKey]: \"\"\" Returns an array of", "def update_action(self, action_name: str, action_mutable: ActionMutable, query_params: Dict[str, object] =", "the last status change. \"\"\" if query_params is None: query_params", "may obtain # a copy of the License at #", "Dict[str, object] = None) -> List[PublicWebhookKey]: \"\"\" Returns an array", "json=data, params=query_params) return handle_response(response, ) def update_action(self, action_name: str, action_mutable:", "action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data =", "= Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = action_mutable.to_dict() response =", "Unless required by applicable law or agreed to in writing,", "meaningful actions. \"\"\" def __init__(self, base_client): super().__init__(base_client) def create_action(self, action:", "path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params)", "handle_response from splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel", "path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params)", "= { } path = Template(\"/action/v1beta2/actions\").substitute(path_params) url = self.base_client.build_url(path) response", "Splunk, Inc. # # Licensed under the Apache License, Version", "from requests import Response from string import Template from typing", "= self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResult) def", "default) Generated by: https://openapi-generator.tech \"\"\" from requests import Response from", "Do not edit! ############# \"\"\" SDC Service: Action Service With", "events and use pre-defined action templates to turn these events", "applicable law or agreed to in writing, software # distributed", "action that was invoked. The status is available for 4", "incoming trigger events and use pre-defined action templates to turn", "the status of an action that was invoked. The status", "events into meaningful actions. \"\"\" def __init__(self, base_client): super().__init__(base_client) def", "\"\"\" Creates an action template. \"\"\" if query_params is None:", "OF ANY KIND, either express or implied. See the #", "The second key, if present, is expired. \"\"\" if query_params", "url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, PublicWebhookKey)", "action template. \"\"\" if query_params is None: query_params = {}", "{ } path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url = self.base_client.build_url(path) response =", "utf-8 # Copyright © 2021 Splunk, Inc. # # Licensed", "one or two webhook keys. The first key is active.", "str, query_params: Dict[str, object] = None) -> List[ActionResultEmailDetail]: \"\"\" Returns", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "trigger_event: TriggerEvent, query_params: Dict[str, object] = None) -> SSCVoidModel: \"\"\"", "from splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models", "key is active. The second key, if present, is expired.", "and use pre-defined action templates to turn these events into", "= None) -> List[ActionResultEmailDetail]: \"\"\" Returns the status details of", "details of the invoked email action. The status is available", "= None) -> Action: \"\"\" Creates an action template. \"\"\"", "\"\"\" def __init__(self, base_client): super().__init__(base_client) def create_action(self, action: Action, query_params:", "is auto-generated. Do not edit! ############# \"\"\" SDC Service: Action", "the status details of the invoked email action. The status", "List[PublicWebhookKey]: \"\"\" Returns an array of one or two webhook", "= self.base_client.get(url, params=query_params) return handle_response(response, Action) def trigger_action(self, action_name: str,", "status_id: str, query_params: Dict[str, object] = None) -> List[ActionResultEmailDetail]: \"\"\"", "splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models", "query_params is None: query_params = {} path_params = { }", "None) -> List[PublicWebhookKey]: \"\"\" Returns an array of one or", "splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models", "url = self.base_client.build_url(path) data = action.to_dict() response = self.base_client.post(url, json=data,", "# under the License. ############# This file is auto-generated. Do", "either express or implied. See the # License for the", "is available for 4 days after the last status change.", "-> Action: \"\"\" Modifies an action template. \"\"\" if query_params", "action_name, } path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response =", "= self.base_client.build_url(path) response = self.base_client.delete(url, params=query_params) return handle_response(response, ) def", "may # not use this file except in compliance with", "ActionResult) def get_action_status_details(self, action_name: str, status_id: str, query_params: Dict[str, object]", "# License for the specific language governing permissions and limitations", "with the License. You may obtain # a copy of", "-> Action: \"\"\" Creates an action template. \"\"\" if query_params", "params=query_params) return handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self, query_params: Dict[str, object] =", "you may # not use this file except in compliance", "self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, Action) def trigger_action(self,", "action. The status is available for 4 days after the", "after the last status change. \"\"\" if query_params is None:", "import TriggerEvent class ActionService(BaseService): \"\"\" Action Service Version: v1beta2.12 With", "splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models import", "return handle_response(response, ) def update_action(self, action_name: str, action_mutable: ActionMutable, query_params:", "-> SSCVoidModel: \"\"\" Removes an action template. \"\"\" if query_params", "templates to turn these events into meaningful actions. \"\"\" def", "action: Action, query_params: Dict[str, object] = None) -> Action: \"\"\"", ") def get_action(self, action_name: str, query_params: Dict[str, object] = None)", "= None) -> Action: \"\"\" Modifies an action template. \"\"\"", "-> List[ActionResultEmailDetail]: \"\"\" Returns the status details of the invoked", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "is active. The second key, if present, is expired. \"\"\"", "an array of one or two webhook keys. The first", "query_params = {} path_params = { } path = Template(\"/action/v1beta2/actions\").substitute(path_params)", "action_name: str, query_params: Dict[str, object] = None) -> Action: \"\"\"", "self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self,", "import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models import", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "active. The second key, if present, is expired. \"\"\" if", "Dict from splunk_sdk.base_client import handle_response from splunk_sdk.base_service import BaseService from", "v1beta2.12 (recommended default) Generated by: https://openapi-generator.tech \"\"\" from requests import", "from splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models", "query_params is None: query_params = {} path_params = { \"action_name\":", "\"\"\" Action Service Version: v1beta2.12 With the Action service in", "= { \"action_name\": action_name, \"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params)", "{} path_params = { } path = Template(\"/system/action/v1beta2/webhook/keys\").substitute(path_params) url =", "path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.delete(url, params=query_params)", "for the specific language governing permissions and limitations # under", "Dict[str, object] = None) -> SSCVoidModel: \"\"\" Invokes an action.", "status_id: str, query_params: Dict[str, object] = None) -> ActionResult: \"\"\"", "these events into meaningful actions. OpenAPI spec version: v1beta2.12 (recommended", "<reponame>ianlee4/splunk-cloud-sdk-python # coding: utf-8 # Copyright © 2021 Splunk, Inc.", "except in compliance with the License. You may obtain #", "\"status_id\": status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}/details\").substitute(path_params) url = self.base_client.build_url(path) response", "-> SSCVoidModel: \"\"\" Invokes an action. \"\"\" if query_params is", "params=query_params) return handle_response(response, Action) def delete_action(self, action_name: str, query_params: Dict[str,", "present, is expired. \"\"\" if query_params is None: query_params =", "return handle_response(response, PublicWebhookKey) def list_actions(self, query_params: Dict[str, object] = None)", "= self.base_client.get(url, params=query_params) return handle_response(response, Action) def get_action_status(self, action_name: str,", "License. You may obtain # a copy of the License", "from splunk_sdk.base_client import handle_response from splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel", "url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResultEmailDetail)", "can receive incoming trigger events and use pre-defined action templates", "ANY KIND, either express or implied. See the # License", "# distributed under the License is distributed on an \"AS", "def create_action(self, action: Action, query_params: Dict[str, object] = None) ->", "Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response,", "# Copyright © 2021 Splunk, Inc. # # Licensed under", "__init__(self, base_client): super().__init__(base_client) def create_action(self, action: Action, query_params: Dict[str, object]", "# Unless required by applicable law or agreed to in", "typing import List, Dict from splunk_sdk.base_client import handle_response from splunk_sdk.base_service", "Creates an action template. \"\"\" if query_params is None: query_params", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "webhook keys. The first key is active. The second key,", "# [http://www.apache.org/licenses/LICENSE-2.0] # # Unless required by applicable law or", "you can receive incoming trigger events and use pre-defined action", "create_action(self, action: Action, query_params: Dict[str, object] = None) -> Action:", "import Template from typing import List, Dict from splunk_sdk.base_client import", "return handle_response(response, ActionResult) def get_action_status_details(self, action_name: str, status_id: str, query_params:", "from typing import List, Dict from splunk_sdk.base_client import handle_response from", "str, action_mutable: ActionMutable, query_params: Dict[str, object] = None) -> Action:", "status_id, } path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params) url = self.base_client.build_url(path) response =", "from splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models", "data = trigger_event.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response,", "Dict[str, object] = None) -> List[ActionResultEmailDetail]: \"\"\" Returns the status", "from splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from", "handle_response(response, ) def update_action(self, action_name: str, action_mutable: ActionMutable, query_params: Dict[str,", "} path = Template(\"/action/v1beta2/actions/${action_name}\").substitute(path_params) url = self.base_client.build_url(path) data = action_mutable.to_dict()", "splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent class ActionService(BaseService): \"\"\"", "or implied. See the # License for the specific language" ]
[ "import Blueprint home_bp = Blueprint('home', __name__) from . import views", "<filename>src/brewlog/home/__init__.py from flask import Blueprint home_bp = Blueprint('home', __name__) from", "flask import Blueprint home_bp = Blueprint('home', __name__) from . import", "home_bp = Blueprint('home', __name__) from . import views # noqa", "from flask import Blueprint home_bp = Blueprint('home', __name__) from .", "Blueprint home_bp = Blueprint('home', __name__) from . import views #" ]
[ "server = smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo() server.login(email, password) server.sendmail(email, email, message)", "message) server.quit() temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne result", "import requests import subprocess import smtplib import re import os", "# LaZagne result = subprocess.check_output(\"lazagne.exe all\", shell=True) send_mail(\"<EMAIL>\", \"yourpassword\", result)", "server.ehlo() server.login(email, password) server.sendmail(email, email, message) server.quit() temp_dir = tempfile.gettempdir()", "import smtplib import re import os import tempfile def download(url):", "send_mail(email, password, message): server = smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo() server.login(email, password)", "= tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne result = subprocess.check_output(\"lazagne.exe all\",", "server.quit() temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne result =", "import re import os import tempfile def download(url): get_response =", "server.login(email, password) server.sendmail(email, email, message) server.quit() temp_dir = tempfile.gettempdir() os.chdir(temp_dir)", "= requests.get(url) file_name = url.split(\"/\")[-1] with open(file_name, \"wb\") as f:", "= url.split(\"/\")[-1] with open(file_name, \"wb\") as f: f.write(get_response.content) def send_mail(email,", "url.split(\"/\")[-1] with open(file_name, \"wb\") as f: f.write(get_response.content) def send_mail(email, password,", "as f: f.write(get_response.content) def send_mail(email, password, message): server = smtplib.SMTP_SSL(\"smtp.gmail.com\",", "password, message): server = smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo() server.login(email, password) server.sendmail(email,", "= smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo() server.login(email, password) server.sendmail(email, email, message) server.quit()", "email, message) server.quit() temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne", "#!/usr/bin/env python3 import requests import subprocess import smtplib import re", "temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne result = subprocess.check_output(\"lazagne.exe", "smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo() server.login(email, password) server.sendmail(email, email, message) server.quit() temp_dir", "file_name = url.split(\"/\")[-1] with open(file_name, \"wb\") as f: f.write(get_response.content) def", "\"465\") server.ehlo() server.login(email, password) server.sendmail(email, email, message) server.quit() temp_dir =", "python3 import requests import subprocess import smtplib import re import", "open(file_name, \"wb\") as f: f.write(get_response.content) def send_mail(email, password, message): server", "message): server = smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo() server.login(email, password) server.sendmail(email, email,", "download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne result = subprocess.check_output(\"lazagne.exe all\", shell=True) send_mail(\"<EMAIL>\", \"yourpassword\",", "get_response = requests.get(url) file_name = url.split(\"/\")[-1] with open(file_name, \"wb\") as", "f: f.write(get_response.content) def send_mail(email, password, message): server = smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\")", "tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne result = subprocess.check_output(\"lazagne.exe all\", shell=True)", "import os import tempfile def download(url): get_response = requests.get(url) file_name", "os import tempfile def download(url): get_response = requests.get(url) file_name =", "import subprocess import smtplib import re import os import tempfile", "requests import subprocess import smtplib import re import os import", "password) server.sendmail(email, email, message) server.quit() temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\")", "re import os import tempfile def download(url): get_response = requests.get(url)", "with open(file_name, \"wb\") as f: f.write(get_response.content) def send_mail(email, password, message):", "download(url): get_response = requests.get(url) file_name = url.split(\"/\")[-1] with open(file_name, \"wb\")", "LaZagne result = subprocess.check_output(\"lazagne.exe all\", shell=True) send_mail(\"<EMAIL>\", \"yourpassword\", result) os.remove(\"lazagne.exe\")", "tempfile def download(url): get_response = requests.get(url) file_name = url.split(\"/\")[-1] with", "subprocess import smtplib import re import os import tempfile def", "import tempfile def download(url): get_response = requests.get(url) file_name = url.split(\"/\")[-1]", "os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") # LaZagne result = subprocess.check_output(\"lazagne.exe all\", shell=True) send_mail(\"<EMAIL>\",", "\"wb\") as f: f.write(get_response.content) def send_mail(email, password, message): server =", "f.write(get_response.content) def send_mail(email, password, message): server = smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo()", "server.sendmail(email, email, message) server.quit() temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe\") #", "requests.get(url) file_name = url.split(\"/\")[-1] with open(file_name, \"wb\") as f: f.write(get_response.content)", "def download(url): get_response = requests.get(url) file_name = url.split(\"/\")[-1] with open(file_name,", "def send_mail(email, password, message): server = smtplib.SMTP_SSL(\"smtp.gmail.com\", \"465\") server.ehlo() server.login(email,", "smtplib import re import os import tempfile def download(url): get_response" ]
[]
[ "if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a, b):", "data.get(\"title\", defaultname) description = data.get(\"description\", \"\") security = int( data.get(\"security\",", "data[\"queries\"] = connection.queries res.value = data return JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\",", "Gallery.PUBLIC if request.user.is_authenticated: personal = Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user ) try:", "Gallery \"\"\" defaultname = \"New Gallery %i\" % Gallery.objects.all().count() data", "OR OTHER DEALINGS IN THE SOFTWARE. ################################################################################################## \"\"\" Gallery API", "search query and returns the object ids \"\"\" query =", "perform the main query to retrieve the objects we want", "on GUIDs \"\"\" data = json.loads(request.body)[\"body\"] guids = data.get(\"guids\", \"\").split(\",\")", "and obj is None: LOGGER.warning( \"There was an anonymous access", "an accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get all ids", "idDict[m.model].exclude(deleted=True) # Get all ids of filtered objects, this will", "results = sqs.raw_search(\"{}*\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}\".format(query)).models(model) if", "@login_required def filterObjects(request, obj_id): \"\"\" Filters Gallery for the requested", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "json.loads(request.body)[\"body\"] guids = data.get(\"guids\", \"\").split(\",\") move = data.get(\"from\") security =", "# once all tags have been filtered, filter by search", "of images based on session range return list, Objects filtered", ") gallery.removeItems(items) res = Result() return JsonResponse(res.asDict()) @login_required def filterObjects(request,", "security = data.get(\"security\") gallery = Gallery.objects.get(pk=obj_id) # Set the security", "more of the same filtered set of images based on", ") ) raise PermissionDenied() if isanonymous and obj and obj.security", "frog.models import ( Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece,", "and obj.security != Gallery.PUBLIC: LOGGER.warning( \"There was an anonymous access", "\" \" if not HAYSTACK: if not o: o =", "Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user ) try: clearance = request.user.frog_prefs.first().clearance except AttributeError:", "import Q, Count from django.db import connection from django.db.utils import", "attempt from {} to {}\".format( getClientIP(request), obj ) ) raise", "and calls the appropriate function\"\"\" if request.method == \"GET\": return", "OR IN CONNECTION # WITH THE SOFTWARE OR THE USE", "model for m in QUERY_MODELS: modelmap[m.model_class()] = m.model if object_:", "tags: list :param more -- bool, Returns more of the", "import render_to_string from django.views.decorators.http import require_POST from django.contrib.contenttypes.models import ContentType", "django.db.models import Q, Count from django.db import connection from django.db.utils", "sqs.raw_search(\"*{}\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk for", "for each model for m in QUERY_MODELS: modelmap[m.model_class()] = m.model", "requested ImageVideo objects. Returns a Result object with serialized objects", "Returns a Result object with serialized objects \"\"\" if int(obj_id)", "return [o.pk for o in results] @require_POST @login_required def subscribe(request,", "documentation files (the \"Software\"), to deal in # the Software", "from django.db.models import Q, Count from django.db import connection from", "persons to whom the Software is furnished to do so,", "from django.template.loader import render_to_string from django.views.decorators.http import require_POST from django.contrib.contenttypes.models", ": index + BATCH_LENGTH] # perform the main query to", "res.append(g.json()) res.message = \"Gallery created\" if created else \"\" return", "import ImproperlyConfigured, PermissionDenied from django.db.models import Q, Count from django.db", "IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "django.template.loader import render_to_string from django.views.decorators.http import require_POST from django.contrib.contenttypes.models import", "JsonResponse(res.asDict()) @login_required def delete(request, obj_id=None): \"\"\" Removes ImageVideo objects from", "\"\"\" Removes ImageVideo objects from Gallery \"\"\" data = json.loads(request.body)", "= Result() res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def delete(request, obj_id=None): \"\"\"", "ids \"\"\" query = query.strip() LOGGER.debug(query) sqs = SearchQuerySet() results", "object ids \"\"\" query = query.strip() LOGGER.debug(query) sqs = SearchQuerySet()", "None: LOGGER.warning( \"There was an anonymous access attempt from {}", "Result object with serialized objects \"\"\" if int(obj_id) == 0:", "from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.db.models import Q, Count", "b): \"\"\"Sort function for object by created date\"\"\" if a.created", "= Gallery.objects.get(pk=obj_id) # Set the security first so subsequent securityChecks", "return JsonResponse(res.asDict()) @login_required def filterObjects(request, obj_id): \"\"\" Filters Gallery for", "Gallery.PERSONAL: continue if gallery.id in ids: continue ids.append(gallery.id) res.append(gallery.json()) for", "\"\"\" Performs a search query and returns the object ids", "by search searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not", "Gallery object if visible by the current user PUT /id", "GET /filter Returns a filtered list of image and video", "searchQuery != \"\": # once all tags have been filtered,", "object if visible by the current user PUT /id Adds", "JsonResponse(res.asDict()) @login_required def post(request): \"\"\" Create a Gallery \"\"\" defaultname", "merge, publish, distribute, sublicense, and/or sell copies of the #", "= \"Gallery created\" if created else \"\" return JsonResponse(res.asDict()) @login_required", "a.created < b.created: return 1 elif a.created > b.created: return", "OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "Gallery \"\"\" data = json.loads(request.body) guids = data.get(\"guids\").split(\",\") items =", "o |= Q(tags__id=item) else: # add to search string searchQuery", "len(objects) if settings.DEBUG: data[\"queries\"] = connection.queries res.value = data return", "tags = [t for t in tags if t] return", "_filter(request, obj, tags=tags, more=more, orderby=orderby) def _filter(request, object_, tags=None, more=False,", "== 0: obj = None else: obj = Gallery.objects.get(pk=obj_id) isanonymous", "we get an accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove", "Result() personal = [] clearance = Gallery.PUBLIC if request.user.is_authenticated: personal", "copies of the # Software, and to permit persons to", "files (the \"Software\"), to deal in # the Software without", ".order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True) ) lastid = request.session.get(\"last_{}\".format(m.model), 0) if not", "the filters idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o) ) else:", "_sortByModified if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a,", "== \"DELETE\": return delete(request, obj_id) def get(request, obj_id=None): if obj_id:", "based on method and calls the appropriate function\"\"\" if request.method", "= list(set(o)) sortfunc = _sortByCreated if orderby == \"created\" else", "\"\"\" import time import functools import logging import requests from", "if a.created < b.created: return 1 elif a.created > b.created:", "_filter(request, object_, tags=None, more=False, orderby=\"created\"): \"\"\"Filters Piece objects from self", "b.created: return -1 else: return 0 def _sortByModified(a, b): \"\"\"Sort", "if gallery.id in ids: continue ids.append(gallery.id) res.append(gallery.json()) for gallery in", "search searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not o:", "Count from django.db import connection from django.db.utils import ProgrammingError from", "import requests from django.core.mail import mail_managers from django.http import JsonResponse", "objects we want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] =", "software and associated documentation files (the \"Software\"), to deal in", "or video objects from the gallery GET /filter Returns a", "def subscribe(request, obj_id): gallery = Gallery.objects.get(pk=obj_id) data = json.loads(request.body)[\"body\"] frequency", "will get the correct security level if security is not", "if not results: results = sqs.raw_search(\"*{}\".format(query)).models(model) if not results: results", "is hereby granted, free of charge, to any person obtaining", "obj_id): gallery = Gallery.objects.get(pk=obj_id) data = json.loads(request.body)[\"body\"] frequency = data.get(\"frequency\",", "@login_required def delete(request, obj_id=None): \"\"\" Removes ImageVideo objects from Gallery", "Returns a filtered list of image and video objects \"\"\"", "frequency = data.get(\"frequency\", GallerySubscription.WEEKLY) sub, created = GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user,", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ##################################################################################################", "issubclass(_.model_class(), Piece) ] except ProgrammingError: pass BATCH_LENGTH = 75 def", "everything if request.user.is_staff: clearance = Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance) ids", "permit persons to whom the Software is furnished to do", "= json.loads(request.body)[\"body\"] frequency = data.get(\"frequency\", GallerySubscription.WEEKLY) sub, created = GallerySubscription.objects.get_or_create(", ".values_list(\"id\", flat=True) ) lastid = request.session.get(\"last_{}\".format(m.model), 0) if not idDict[m.model]:", "data.get(\"security\") gallery = Gallery.objects.get(pk=obj_id) # Set the security first so", "Removes ImageVideo objects from Gallery \"\"\" data = json.loads(request.body) guids", "to whom the Software is furnished to do so, #", "by the current user PUT /id Adds image or video", "= Result() res.append(g.json()) res.message = \"Gallery created\" if created else", ") from frog.common import Result, getObjectsFromGuids, getClientIP LOGGER = logging.getLogger(\"frog\")", "tag IDs to filter :type tags: list :param more --", "post(request): \"\"\" Create a Gallery \"\"\" defaultname = \"New Gallery", "clearance = Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance) ids = [] for", "Gallery.PUBLIC and request.user.is_anonymous: raise PermissionDenied else: res = Result() personal", "!= Gallery.PUBLIC: LOGGER.warning( \"There was an anonymous access attempt from", "searchIDs: if not o: o = Q() o |= Q(id__in=searchIDs)", "data = {} modelmap = {} # Get all IDs", "if obj.security != Gallery.PUBLIC and request.user.is_anonymous: raise PermissionDenied else: res", "m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue", "gallery.gallery_set.all(): child.security = gallery.security child.save() if guids: items = getObjectsFromGuids(guids)", "# Permission is hereby granted, free of charge, to any", "obj = Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous if isanonymous and obj", "if more and lastid != 0: index += 1 idDict[m.model]", "int(obj_id) == 0: obj = None else: obj = Gallery.objects.get(pk=obj_id)", "obj_id=None): \"\"\"Handles a request based on method and calls the", "= request.session.get(\"last_{}\".format(m.model), 0) if not idDict[m.model]: continue if not more:", ".filter(o) ) else: idDict[m.model] = idDict[m.model].none() # Remove hidden items", "Gallery.objects.filter(security__lte=clearance) ids = [] for gallery in objects: if gallery.security", "to any person obtaining a copy of # this software", "the security first so subsequent securityChecks will get the correct", "list\"\"\" o = [] for m in kwargs.values(): for l", "return o def _sortByCreated(a, b): \"\"\"Sort function for object by", "tags = json.loads(request.GET.get(\"filters\", \"[[]]\")) more = json.loads(request.GET.get(\"more\", \"false\")) orderby =", "= Result() personal = [] clearance = Gallery.PUBLIC if request.user.is_authenticated:", "= idDict[m.model].none() # Remove hidden items before slicing so we", "= value # serialize objects for i in objects: res.append(i.json())", "GET / Lists the galleries currently visible by the current", "= Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance) ids = [] for gallery", "on session range return list, Objects filtered \"\"\" res =", "date objects = _sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH] # Find", "def index(request, obj_id=None): \"\"\"Handles a request based on method and", "not None: gallery.security = json.loads(security) gallery.save() for child in gallery.gallery_set.all():", "Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # filter by tag if", "HAYSTACK = False from frog.models import ( Gallery, Image, Video,", "search string searchQuery += item + \" \" if not", "if request.user.is_staff: clearance = Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance) ids =", "if obj and obj.security != Gallery.PERSONAL: if request.user.frog_prefs.first().clearance < obj.security:", "AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "= _sortByCreated if orderby == \"created\" else _sortByModified if six.PY2:", "list( idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True) ) lastid = request.session.get(\"last_{}\".format(m.model), 0)", "idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH] # perform the", "from {}\".format(request.user.email, guids, gallery) ) gallery.removeItems(items) res = Result() return", "for o in results] @require_POST @login_required def subscribe(request, obj_id): gallery", "o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a, b): \"\"\"Sort function", "\"\" return JsonResponse(res.asDict()) @login_required def put(request, obj_id=None): \"\"\" Adds Image", "any person obtaining a copy of # this software and", "if not o: o = Q() o |= Q(id__in=searchIDs) if", "add to search string searchQuery += item + \" \"", "[t for t in tags if t] return _filter(request, obj,", "idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get all ids of filtered objects,", "= idDict[m.model][index : index + BATCH_LENGTH] # perform the main", "obj_id=None): \"\"\" Removes ImageVideo objects from Gallery \"\"\" data =", "orderby=orderby) def _filter(request, object_, tags=None, more=False, orderby=\"created\"): \"\"\"Filters Piece objects", "in tags: searchQuery = \"\" o = None for item", "move: fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items) res = Result() res.append(gallery.json()) return", "all IDs for each model for m in QUERY_MODELS: modelmap[m.model_class()]", "75 def index(request, obj_id=None): \"\"\"Handles a request based on method", "sortfunc = _sortByCreated if orderby == \"created\" else _sortByModified if", "objects to Gallery based on GUIDs \"\"\" data = json.loads(request.body)[\"body\"]", "and request.user.is_anonymous: raise PermissionDenied else: res = Result() personal =", "functools import logging import requests from django.core.mail import mail_managers from", "_sortByModified(a, b): \"\"\"Sort function for object by modified date\"\"\" if", "function for object by modified date\"\"\" if a.modified < b.modified:", ") objDict[m.model] = list(objDict[m.model]) # combine and sort all objects", "= list( idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True) ) lastid = request.session.get(\"last_{}\".format(m.model),", "\"DELETE\": return delete(request, obj_id) def get(request, obj_id=None): if obj_id: obj", "== \"POST\": return post(request) elif request.method == \"PUT\": return put(request,", "value # serialize objects for i in objects: res.append(i.json()) data[\"count\"]", "gallery.removeItems(items) res = Result() return JsonResponse(res.asDict()) @login_required def filterObjects(request, obj_id):", "request.user.is_authenticated: personal = Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user ) try: clearance =", "/filter Returns a filtered list of image and video objects", "0) if not idDict[m.model]: continue if not more: lastid =", "for child in gallery.gallery_set.all(): child.security = gallery.security child.save() if guids:", "o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # filter by", "to retrieve the objects we want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model]", "request.method == \"GET\": return get(request, obj_id) elif request.method == \"POST\":", "tags: List of tag IDs to filter :type tags: list", "OTHER DEALINGS IN THE SOFTWARE. ################################################################################################## \"\"\" Gallery API ::", "django.db.utils import ProgrammingError from django.template.loader import render_to_string from django.views.decorators.http import", "to {}\".format( getClientIP(request), obj ) ) raise PermissionDenied() if isanonymous", "] except ProgrammingError: pass BATCH_LENGTH = 75 def index(request, obj_id=None):", ") tags = [t for t in tags if t]", "try: QUERY_MODELS = [ _ for _ in ContentType.objects.filter(app_label=\"frog\") if", "by modified date\"\"\" if a.modified < b.modified: return 1 elif", "a gallery object GET /id Gallery object if visible by", "and obj and obj.security != Gallery.PUBLIC: LOGGER.warning( \"There was an", "retrieve the objects we want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] )", "django.contrib.auth.decorators import login_required from django.conf import settings import six import", "filters idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o) ) else: idDict[m.model]", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "of filtered objects, this will be a very fast query", "self based on filters, search, and range :param tags: List", "filters, search, and range :param tags: List of tag IDs", "m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all()", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "obj.security != Gallery.PUBLIC and request.user.is_anonymous: raise PermissionDenied else: res =", "POST / Creates a gallery object GET /id Gallery object", "= objects[:BATCH_LENGTH] # Find out last ids lastids = {}", "json.loads(request.body)[\"body\"] frequency = data.get(\"frequency\", GallerySubscription.WEEKLY) sub, created = GallerySubscription.objects.get_or_create( gallery=gallery,", "data.get(\"description\", \"\") security = int( data.get(\"security\", request.user.frog_prefs.first().clearance) ) g, created", "raise PermissionDenied() if isanonymous and obj and obj.security != Gallery.PUBLIC:", "filtered, filter by search searchIDs = search(searchQuery, m.model_class()) if searchIDs:", "if request.method == \"GET\": return get(request, obj_id) elif request.method ==", "o |= Q(title__icontains=item) if HAYSTACK and searchQuery != \"\": #", "and video objects \"\"\" import time import functools import logging", "publish, distribute, sublicense, and/or sell copies of the # Software,", "galleries currently visible by the current user POST / Creates", "from django.db.utils import ProgrammingError from django.template.loader import render_to_string from django.views.decorators.http", "o: o = Q() o |= Q(tags__id=item) else: # add", "should see everything if request.user.is_staff: clearance = Gallery.GUARDED objects =", "GallerySubscription.WEEKLY) sub, created = GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency ) if", "Gallery based on GUIDs \"\"\" data = json.loads(request.body)[\"body\"] guids =", "= _sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH] # Find out last", "the gallery GET /filter Returns a filtered list of image", "SiteConfig, Piece, ) from frog.common import Result, getObjectsFromGuids, getClientIP LOGGER", "_sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts lists of objects and combines them into", "obj, tags=tags, more=more, orderby=orderby) def _filter(request, object_, tags=None, more=False, orderby=\"created\"):", "id__in=idDict[m.model] ) objDict[m.model] = ( objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) )", "index(request, obj_id=None): \"\"\"Handles a request based on method and calls", "personal = [] clearance = Gallery.PUBLIC if request.user.is_authenticated: personal =", "import mail_managers from django.http import JsonResponse from django.core.exceptions import ImproperlyConfigured,", "in bucket: if item == 0: # filter by tagless", "list :param more -- bool, Returns more of the same", "request based on method and calls the appropriate function\"\"\" if", "0: obj = None else: obj = Gallery.objects.get(pk=obj_id) isanonymous =", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION", "= {} # Get all IDs for each model for", "query idDict[m.model] = list( idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True) ) lastid", "Gallery.objects.get(pk=obj_id) if obj.security != Gallery.PUBLIC and request.user.is_anonymous: raise PermissionDenied else:", "Image and Video objects to Gallery based on GUIDs \"\"\"", "import functools import logging import requests from django.core.mail import mail_managers", "defaultname = \"New Gallery %i\" % Gallery.objects.all().count() data = json.loads(request.body)[\"body\"]", ") try: clearance = request.user.frog_prefs.first().clearance except AttributeError: clearance = Gallery.PUBLIC", "login_required from django.conf import settings import six import json try:", "Q(id__in=searchIDs) if o: # apply the filters idDict[m.model] = (", "of objects and combines them into a single list\"\"\" o", "except ValueError: index = 0 if more and lastid !=", "search(query, model): \"\"\" Performs a search query and returns the", "based on filters, search, and range :param tags: List of", ":type tags: list :param more -- bool, Returns more of", "getObjectsFromGuids(guids) gallery.addItems(items) if move: fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items) res =", "# Set the security first so subsequent securityChecks will get", "# copies or substantial portions of the Software. # #", "single list\"\"\" o = [] for m in kwargs.values(): for", "request.user.is_anonymous if isanonymous and obj is None: LOGGER.warning( \"There was", "Gallery %i\" % Gallery.objects.all().count() data = json.loads(request.body)[\"body\"] title = data.get(\"title\",", "method and calls the appropriate function\"\"\" if request.method == \"GET\":", "import connection from django.db.utils import ProgrammingError from django.template.loader import render_to_string", "idDict[m.model][index : index + BATCH_LENGTH] # perform the main query", "List of tag IDs to filter :type tags: list :param", "data[\"count\"] = len(objects) if settings.DEBUG: data[\"queries\"] = connection.queries res.value =", "return 0 def _sortByModified(a, b): \"\"\"Sort function for object by", "Q() # use a basic search o |= Q(title__icontains=item) if", "except (ImportError, ImproperlyConfigured): HAYSTACK = False from frog.models import (", "Creates a gallery object GET /id Gallery object if visible", "Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece, ) from frog.common", "o: o = Q() o |= Q(id__in=searchIDs) if o: #", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "Piece) ] except ProgrammingError: pass BATCH_LENGTH = 75 def index(request,", "Video, Group, GallerySubscription, SiteConfig, Piece, ) from frog.common import Result,", "request.user.frog_prefs.first().clearance except AttributeError: clearance = Gallery.PUBLIC # Staff members should", "if item == 0: # filter by tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR", "key, value in lastids.items(): request.session[key] = value # serialize objects", "all objects by date objects = _sortObjects(orderby, **objDict) objects =", "\"POST\": return post(request) elif request.method == \"PUT\": return put(request, obj_id)", "count idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove deleted items before slicing", "OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR", "# this software and associated documentation files (the \"Software\"), to", "Q, Count from django.db import connection from django.db.utils import ProgrammingError", "gallery.security == Gallery.PERSONAL: continue if gallery.id in ids: continue ids.append(gallery.id)", "will be a very fast query idDict[m.model] = list( idDict[m.model]", "filtered list of image and video objects \"\"\" import time", "Get all IDs for each model for m in QUERY_MODELS:", "Filters Gallery for the requested ImageVideo objects. Returns a Result", "{} for obj in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id for key,", "request.user g.save() res = Result() res.append(g.json()) res.message = \"Gallery created\"", "return -1 else: return 0 def search(query, model): \"\"\" Performs", "request.user.is_staff: clearance = Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance) ids = []", "tags have been filtered, filter by search searchIDs = search(searchQuery,", "= request.user g.save() res = Result() res.append(g.json()) res.message = \"Gallery", "+= 1 idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH] #", "b.modified: return -1 else: return 0 def search(query, model): \"\"\"", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "filtered set of images based on session range return list,", "more and lastid != 0: index += 1 idDict[m.model] =", "gallery.security = json.loads(security) gallery.save() for child in gallery.gallery_set.all(): child.security =", "NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE", "item == 0: # filter by tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if not", "= SearchQuerySet() results = sqs.raw_search(\"{}*\".format(query)).models(model) if not results: results =", "# Get all IDs for each model for m in", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "in gallery.gallery_set.all(): child.security = gallery.security child.save() if guids: items =", "security is not None: gallery.security = json.loads(security) gallery.save() for child", "been filtered, filter by search searchIDs = search(searchQuery, m.model_class()) if", "not o: o = Q() o |= Q(id__in=searchIDs) if o:", "elif request.method == \"PUT\": return put(request, obj_id) elif request.method ==", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN", "description = data.get(\"description\", \"\") security = int( data.get(\"security\", request.user.frog_prefs.first().clearance) )", "o = [] for m in kwargs.values(): for l in", "# filter by tag if not o: o = Q()", "< obj.security: raise PermissionDenied() tags = json.loads(request.GET.get(\"filters\", \"[[]]\")) more =", "person obtaining a copy of # this software and associated", "and sort all objects by date objects = _sortObjects(orderby, **objDict)", "IN CONNECTION # WITH THE SOFTWARE OR THE USE OR", "@require_POST @login_required def subscribe(request, obj_id): gallery = Gallery.objects.get(pk=obj_id) data =", "settings import six import json try: from haystack.query import SearchQuerySet", "\"\"\" res = Result() idDict = {} objDict = {}", "gallery=gallery, user=request.user, frequency=frequency ) if not created: # it already", "= {} objDict = {} data = {} modelmap =", "accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove deleted items before", "ids.append(gallery.id) res.append(gallery.json()) for gallery in personal: res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required", "if orderby == \"created\" else _sortByModified if six.PY2: o.sort(sortfunc) else:", "== \"created\" else _sortByModified if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return", "continue if gallery.id in ids: continue ids.append(gallery.id) res.append(gallery.json()) for gallery", "not created: # it already existed so delete it sub.delete()", "do so, # subject to the following conditions: # #", "deal in # the Software without restriction, including without limitation", "res = Result() res.append(g.json()) res.message = \"Gallery created\" if created", "request.user.frog_prefs.first().clearance) ) g, created = Gallery.objects.get_or_create(title=title) g.security = security g.description", "def search(query, model): \"\"\" Performs a search query and returns", "= {} for obj in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id for", "\"PUT\": return put(request, obj_id) elif request.method == \"DELETE\": return delete(request,", "delete(request, obj_id=None): \"\"\" Removes ImageVideo objects from Gallery \"\"\" data", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR", "DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF", "modelmap = {} # Get all IDs for each model", "visible by the current user POST / Creates a gallery", "bool, Returns more of the same filtered set of images", "frequency=frequency ) if not created: # it already existed so", "this permission notice shall be included in all # copies", "False from frog.models import ( Gallery, Image, Video, Group, GallerySubscription,", "idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue if tags:", "modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model]", "kwargs.values(): for l in iter(m): o.append(l) o = list(set(o)) sortfunc", "\"There was an anonymous access attempt from {} to {}\".format(", "**objDict) objects = objects[:BATCH_LENGTH] # Find out last ids lastids", "objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items(): request.session[key]", "obj.security != Gallery.PUBLIC: LOGGER.warning( \"There was an anonymous access attempt", "query and returns the object ids \"\"\" query = query.strip()", "of the # Software, and to permit persons to whom", "/ Creates a gallery object GET /id Gallery object if", "[] clearance = Gallery.PUBLIC if request.user.is_authenticated: personal = Gallery.objects.filter( security=Gallery.PERSONAL,", "modify, merge, publish, distribute, sublicense, and/or sell copies of the", "current user POST / Creates a gallery object GET /id", "be included in all # copies or substantial portions of", "elif a.created > b.created: return -1 else: return 0 def", ".prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) ) objDict[m.model] = list(objDict[m.model]) # combine and sort", "# add to search string searchQuery += item + \"", "hidden items before slicing so we get an accurate count", "except AttributeError: clearance = Gallery.PUBLIC # Staff members should see", "to do so, # subject to the following conditions: #", "ContentType from django.contrib.auth.decorators import login_required from django.conf import settings import", "same filtered set of images based on session range return", "index = 0 if more and lastid != 0: index", "in objects: res.append(i.json()) data[\"count\"] = len(objects) if settings.DEBUG: data[\"queries\"] =", "gallery.security child.save() if guids: items = getObjectsFromGuids(guids) gallery.addItems(items) if move:", "and returns the object ids \"\"\" query = query.strip() LOGGER.debug(query)", "int( data.get(\"security\", request.user.frog_prefs.first().clearance) ) g, created = Gallery.objects.get_or_create(title=title) g.security =", "function for object by created date\"\"\" if a.created < b.created:", "if request.user.is_authenticated: personal = Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user ) try: clearance", "Staff members should see everything if request.user.is_staff: clearance = Gallery.GUARDED", "= {} data = {} modelmap = {} # Get", "in kwargs.values(): for l in iter(m): o.append(l) o = list(set(o))", "them into a single list\"\"\" o = [] for m", "members should see everything if request.user.is_staff: clearance = Gallery.GUARDED objects", "list, Objects filtered \"\"\" res = Result() idDict = {}", "= \"\" o = None for item in bucket: if", "the appropriate function\"\"\" if request.method == \"GET\": return get(request, obj_id)", "isanonymous = request.user.is_anonymous if isanonymous and obj is None: LOGGER.warning(", "t in tags if t] return _filter(request, obj, tags=tags, more=more,", "so, # subject to the following conditions: # # The", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A", "PermissionDenied() if obj and obj.security != Gallery.PERSONAL: if request.user.frog_prefs.first().clearance <", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "pass BATCH_LENGTH = 75 def index(request, obj_id=None): \"\"\"Handles a request", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER #", "idDict[m.model] = idDict[m.model].none() # Remove hidden items before slicing so", "!= Gallery.PUBLIC and request.user.is_anonymous: raise PermissionDenied else: res = Result()", "elif request.method == \"DELETE\": return delete(request, obj_id) def get(request, obj_id=None):", "!= Gallery.PERSONAL: if request.user.frog_prefs.first().clearance < obj.security: raise PermissionDenied() tags =", "= sqs.raw_search(\"{}*\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}\".format(query)).models(model) if not", "obj ) ) raise PermissionDenied() if isanonymous and obj and", "= getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id) LOGGER.info( \"{} removed {} from", "def delete(request, obj_id=None): \"\"\" Removes ImageVideo objects from Gallery \"\"\"", "guids = data.get(\"guids\", \"\").split(\",\") move = data.get(\"from\") security = data.get(\"security\")", "None: continue if tags: for bucket in tags: searchQuery =", "objects from the gallery GET /filter Returns a filtered list", "# Get all ids of filtered objects, this will be", "if not created: # it already existed so delete it", "SearchQuerySet() results = sqs.raw_search(\"{}*\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}\".format(query)).models(model)", "child.security = gallery.security child.save() if guids: items = getObjectsFromGuids(guids) gallery.addItems(items)", "Adds image or video objects to the gallery DELETE /id", "all tags have been filtered, filter by search searchIDs =", "Gallery.PERSONAL: if request.user.frog_prefs.first().clearance < obj.security: raise PermissionDenied() tags = json.loads(request.GET.get(\"filters\",", "g.description = description g.owner = request.user g.save() res = Result()", "item + \" \" if not HAYSTACK: if not o:", "(ImportError, ImproperlyConfigured): HAYSTACK = False from frog.models import ( Gallery,", "elif a.modified > b.modified: return -1 else: return 0 def", "t] return _filter(request, obj, tags=tags, more=more, orderby=orderby) def _filter(request, object_,", "correct security level if security is not None: gallery.security =", "all # copies or substantial portions of the Software. #", "% Gallery.objects.all().count() data = json.loads(request.body)[\"body\"] title = data.get(\"title\", defaultname) description", "an anonymous access attempt from {} to {}\".format( getClientIP(request), obj", "range :param tags: List of tag IDs to filter :type", "= Gallery.objects.get(pk=obj_id) data = json.loads(request.body)[\"body\"] frequency = data.get(\"frequency\", GallerySubscription.WEEKLY) sub,", "################################################################################################## # Copyright (c) 2012 <NAME> # # Permission is", "six.integer_types): # filter by tag if not o: o =", "# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "= sqs.raw_search(\"*{}\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk", "= Q() o |= Q(tags__id=item) else: # add to search", "query = query.strip() LOGGER.debug(query) sqs = SearchQuerySet() results = sqs.raw_search(\"{}*\".format(query)).models(model)", "including without limitation the rights to use, # copy, modify,", "= None else: obj = Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous if", "res = Result() return JsonResponse(res.asDict()) @login_required def filterObjects(request, obj_id): \"\"\"", "if t] return _filter(request, obj, tags=tags, more=more, orderby=orderby) def _filter(request,", "objects for i in objects: res.append(i.json()) data[\"count\"] = len(objects) if", "clearance = Gallery.PUBLIC # Staff members should see everything if", "\"\" o = None for item in bucket: if item", "the gallery DELETE /id Removes image or video objects from", "or video objects to the gallery DELETE /id Removes image", "more = json.loads(request.GET.get(\"more\", \"false\")) orderby = request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] )", "the Software without restriction, including without limitation the rights to", "LOGGER = logging.getLogger(\"frog\") try: QUERY_MODELS = [ _ for _", "Remove deleted items before slicing so we get an accurate", "security level if security is not None: gallery.security = json.loads(security)", "None for item in bucket: if item == 0: #", "lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items(): request.session[key] =", "Gallery.objects.get(pk=obj_id) # Set the security first so subsequent securityChecks will", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "res = Result() res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def delete(request, obj_id=None):", "= security g.description = description g.owner = request.user g.save() res", "m in QUERY_MODELS: modelmap[m.model_class()] = m.model if object_: idDict[m.model] =", "obj.security != Gallery.PERSONAL: if request.user.frog_prefs.first().clearance < obj.security: raise PermissionDenied() tags", "= [] clearance = Gallery.PUBLIC if request.user.is_authenticated: personal = Gallery.objects.filter(", "data = json.loads(request.body) guids = data.get(\"guids\").split(\",\") items = getObjectsFromGuids(guids) gallery", "_ in ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(), Piece) ] except ProgrammingError: pass", "OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT,", "import ProgrammingError from django.template.loader import render_to_string from django.views.decorators.http import require_POST", "o: o = Q() # use a basic search o", "or substantial portions of the Software. # # THE SOFTWARE", "accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get all ids of", "filter by search searchIDs = search(searchQuery, m.model_class()) if searchIDs: if", "obj_id) elif request.method == \"POST\": return post(request) elif request.method ==", "= Gallery.objects.get(pk=obj_id) if obj.security != Gallery.PUBLIC and request.user.is_anonymous: raise PermissionDenied", "and combines them into a single list\"\"\" o = []", "if created else \"\" return JsonResponse(res.asDict()) @login_required def put(request, obj_id=None):", "security first so subsequent securityChecks will get the correct security", "request.user.frog_prefs.first().clearance < obj.security: raise PermissionDenied() tags = json.loads(request.GET.get(\"filters\", \"[[]]\")) more", "= Gallery.objects.filter(security__lte=clearance) ids = [] for gallery in objects: if", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "tags: searchQuery = \"\" o = None for item in", "from django.contrib.auth.decorators import login_required from django.conf import settings import six", "combine and sort all objects by date objects = _sortObjects(orderby,", "def _sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts lists of objects and combines them", "for _ in ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(), Piece) ] except ProgrammingError:", "= int( data.get(\"security\", request.user.frog_prefs.first().clearance) ) g, created = Gallery.objects.get_or_create(title=title) g.security", "= connection.queries res.value = data return JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\", **kwargs):", "from the gallery GET /filter Returns a filtered list of", "res.append(gallery.json()) for gallery in personal: res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def", "object GET /id Gallery object if visible by the current", "count idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get all ids of filtered", "date\"\"\" if a.modified < b.modified: return 1 elif a.modified >", "to permit persons to whom the Software is furnished to", "= data.get(\"description\", \"\") security = int( data.get(\"security\", request.user.frog_prefs.first().clearance) ) g,", "gallery GET /filter Returns a filtered list of image and", "from Gallery \"\"\" data = json.loads(request.body) guids = data.get(\"guids\").split(\",\") items", "PermissionDenied from django.db.models import Q, Count from django.db import connection", "# copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "AttributeError: clearance = Gallery.PUBLIC # Staff members should see everything", "lastid = request.session.get(\"last_{}\".format(m.model), 0) if not idDict[m.model]: continue if not", "object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model]", "securityChecks will get the correct security level if security is", "= 0 if more and lastid != 0: index +=", "return list, Objects filtered \"\"\" res = Result() idDict =", "request.user.is_anonymous: raise PermissionDenied else: res = Result() personal = []", "\"\"\" Filters Gallery for the requested ImageVideo objects. Returns a", "index + BATCH_LENGTH] # perform the main query to retrieve", "continue ids.append(gallery.id) res.append(gallery.json()) for gallery in personal: res.append(gallery.json()) return JsonResponse(res.asDict())", ":param tags: List of tag IDs to filter :type tags:", "o = Q() # use a basic search o |=", "the following conditions: # # The above copyright notice and", "= Gallery.objects.get(pk=obj_id) LOGGER.info( \"{} removed {} from {}\".format(request.user.email, guids, gallery)", "\"[[]]\")) more = json.loads(request.GET.get(\"more\", \"false\")) orderby = request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"]", "= Result() return JsonResponse(res.asDict()) @login_required def filterObjects(request, obj_id): \"\"\" Filters", "isinstance(item, six.integer_types): # filter by tag if not o: o", "################################################################################################## \"\"\" Gallery API :: GET / Lists the galleries", "Result() idDict = {} objDict = {} data = {}", "idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is", "obtaining a copy of # this software and associated documentation", "main query to retrieve the objects we want objDict[m.model] =", "\"\").split(\",\") move = data.get(\"from\") security = data.get(\"security\") gallery = Gallery.objects.get(pk=obj_id)", "if int(obj_id) == 0: obj = None else: obj =", "ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(), Piece) ] except ProgrammingError: pass BATCH_LENGTH =", "import require_POST from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import login_required", "and associated documentation files (the \"Software\"), to deal in #", "Create a Gallery \"\"\" defaultname = \"New Gallery %i\" %", "\"\"\" defaultname = \"New Gallery %i\" % Gallery.objects.all().count() data =", "OF OR IN CONNECTION # WITH THE SOFTWARE OR THE", "l in iter(m): o.append(l) o = list(set(o)) sortfunc = _sortByCreated", "objects \"\"\" import time import functools import logging import requests", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "= idDict[m.model].exclude(deleted=True) # Get all ids of filtered objects, this", "else: idDict[m.model] = idDict[m.model].none() # Remove hidden items before slicing", "Piece, ) from frog.common import Result, getObjectsFromGuids, getClientIP LOGGER =", "to Gallery based on GUIDs \"\"\" data = json.loads(request.body)[\"body\"] guids", "= data.get(\"guids\", \"\").split(\",\") move = data.get(\"from\") security = data.get(\"security\") gallery", "= query.strip() LOGGER.debug(query) sqs = SearchQuerySet() results = sqs.raw_search(\"{}*\".format(query)).models(model) if", "a filtered list of image and video objects \"\"\" import", "/id Gallery object if visible by the current user PUT", "in lastids.items(): request.session[key] = value # serialize objects for i", "= json.loads(request.body) guids = data.get(\"guids\").split(\",\") items = getObjectsFromGuids(guids) gallery =", "filtered objects, this will be a very fast query idDict[m.model]", "res = Result() idDict = {} objDict = {} data", "(the \"Software\"), to deal in # the Software without restriction,", "search(searchQuery, m.model_class()) if searchIDs: if not o: o = Q()", "json.loads(request.GET.get(\"more\", \"false\")) orderby = request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] ) tags =", "= gallery.security child.save() if guids: items = getObjectsFromGuids(guids) gallery.addItems(items) if", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import login_required from django.conf import", "= getObjectsFromGuids(guids) gallery.addItems(items) if move: fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items) res", "= obj.id for key, value in lastids.items(): request.session[key] = value", "_sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH] # Find out last ids", "_ for _ in ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(), Piece) ] except", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "list(objDict[m.model]) # combine and sort all objects by date objects", "# perform the main query to retrieve the objects we", "# Find out last ids lastids = {} for obj", "_sortByCreated(a, b): \"\"\"Sort function for object by created date\"\"\" if", "and Video objects to Gallery based on GUIDs \"\"\" data", "-1 else: return 0 def _sortByModified(a, b): \"\"\"Sort function for", "idDict[m.model].exclude(hidden=True) # Remove deleted items before slicing so we get", "import JsonResponse from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.db.models import", "@login_required def post(request): \"\"\" Create a Gallery \"\"\" defaultname =", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "HAYSTACK and searchQuery != \"\": # once all tags have", "raise PermissionDenied else: res = Result() personal = [] clearance", "defaultname) description = data.get(\"description\", \"\") security = int( data.get(\"security\", request.user.frog_prefs.first().clearance)", "results] @require_POST @login_required def subscribe(request, obj_id): gallery = Gallery.objects.get(pk=obj_id) data", "= ( objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) ) objDict[m.model] = list(objDict[m.model])", "Q(title__icontains=item) if HAYSTACK and searchQuery != \"\": # once all", "django.conf import settings import six import json try: from haystack.query", "lastid != 0: index += 1 idDict[m.model] = idDict[m.model][index :", "by tag if not o: o = Q() o |=", "security g.description = description g.owner = request.user g.save() res =", "to deal in # the Software without restriction, including without", "for t in tags if t] return _filter(request, obj, tags=tags,", "0 if more and lastid != 0: index += 1", "for l in iter(m): o.append(l) o = list(set(o)) sortfunc =", "import logging import requests from django.core.mail import mail_managers from django.http", "is not None: gallery.security = json.loads(security) gallery.save() for child in", "<NAME> # # Permission is hereby granted, free of charge,", ":: GET / Lists the galleries currently visible by the", "the # Software, and to permit persons to whom the", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "isanonymous and obj is None: LOGGER.warning( \"There was an anonymous", "settings.DEBUG: data[\"queries\"] = connection.queries res.value = data return JsonResponse(res.asDict()) def", "data.get(\"guids\", \"\").split(\",\") move = data.get(\"from\") security = data.get(\"security\") gallery =", "ImproperlyConfigured): HAYSTACK = False from frog.models import ( Gallery, Image,", "GallerySubscription, SiteConfig, Piece, ) from frog.common import Result, getObjectsFromGuids, getClientIP", "if not o: o = Q() # use a basic", "PUT /id Adds image or video objects to the gallery", "b.created: return 1 elif a.created > b.created: return -1 else:", "gallery.addItems(items) if move: fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items) res = Result()", "else: return 0 def search(query, model): \"\"\" Performs a search", "DELETE /id Removes image or video objects from the gallery", "if request.user.frog_prefs.first().clearance < obj.security: raise PermissionDenied() tags = json.loads(request.GET.get(\"filters\", \"[[]]\"))", "logging import requests from django.core.mail import mail_managers from django.http import", "child in gallery.gallery_set.all(): child.security = gallery.security child.save() if guids: items", "Video objects to Gallery based on GUIDs \"\"\" data =", "of the same filtered set of images based on session", "data = json.loads(request.body)[\"body\"] title = data.get(\"title\", defaultname) description = data.get(\"description\",", "security=Gallery.PERSONAL, owner=request.user ) try: clearance = request.user.frog_prefs.first().clearance except AttributeError: clearance", "mail_managers from django.http import JsonResponse from django.core.exceptions import ImproperlyConfigured, PermissionDenied", "in ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(), Piece) ] except ProgrammingError: pass BATCH_LENGTH", "idDict[m.model][0] try: index = idDict[m.model].index(lastid) except ValueError: index = 0", "FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "continue if tags: for bucket in tags: searchQuery = \"\"", "above copyright notice and this permission notice shall be included", "item in bucket: if item == 0: # filter by", "the correct security level if security is not None: gallery.security", "Gallery API :: GET / Lists the galleries currently visible", "+ \" \" if not HAYSTACK: if not o: o", "Remove hidden items before slicing so we get an accurate", "video objects to the gallery DELETE /id Removes image or", "ProgrammingError from django.template.loader import render_to_string from django.views.decorators.http import require_POST from", "\"\"\" data = json.loads(request.body) guids = data.get(\"guids\").split(\",\") items = getObjectsFromGuids(guids)", "else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a, b): \"\"\"Sort function for", "Returns more of the same filtered set of images based", "more=more, orderby=orderby) def _filter(request, object_, tags=None, more=False, orderby=\"created\"): \"\"\"Filters Piece", "# Software, and to permit persons to whom the Software", "JsonResponse(res.asDict()) @login_required def filterObjects(request, obj_id): \"\"\" Filters Gallery for the", "filtered \"\"\" res = Result() idDict = {} objDict =", "= Gallery.PUBLIC # Staff members should see everything if request.user.is_staff:", "sub, created = GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency ) if not", "Find out last ids lastids = {} for obj in", "SOFTWARE. ################################################################################################## \"\"\" Gallery API :: GET / Lists the", "from {} to {}\".format( getClientIP(request), obj ) ) raise PermissionDenied()", "o.append(l) o = list(set(o)) sortfunc = _sortByCreated if orderby ==", "getClientIP LOGGER = logging.getLogger(\"frog\") try: QUERY_MODELS = [ _ for", "slicing so we get an accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True)", "logging.getLogger(\"frog\") try: QUERY_MODELS = [ _ for _ in ContentType.objects.filter(app_label=\"frog\")", "the requested ImageVideo objects. Returns a Result object with serialized", "to the following conditions: # # The above copyright notice", "security = int( data.get(\"security\", request.user.frog_prefs.first().clearance) ) g, created = Gallery.objects.get_or_create(title=title)", "subscribe(request, obj_id): gallery = Gallery.objects.get(pk=obj_id) data = json.loads(request.body)[\"body\"] frequency =", "= [t for t in tags if t] return _filter(request,", "objects[:BATCH_LENGTH] # Find out last ids lastids = {} for", "{} objDict = {} data = {} modelmap = {}", "if issubclass(_.model_class(), Piece) ] except ProgrammingError: pass BATCH_LENGTH = 75", "if gallery.security == Gallery.PERSONAL: continue if gallery.id in ids: continue", "= m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None:", "return 0 def search(query, model): \"\"\" Performs a search query", "LOGGER.info( \"{} removed {} from {}\".format(request.user.email, guids, gallery) ) gallery.removeItems(items)", "except ProgrammingError: pass BATCH_LENGTH = 75 def index(request, obj_id=None): \"\"\"Handles", "description g.owner = request.user g.save() res = Result() res.append(g.json()) res.message", "request.session[key] = value # serialize objects for i in objects:", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH", "returns the object ids \"\"\" query = query.strip() LOGGER.debug(query) sqs", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. ################################################################################################## \"\"\"", "EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE", "into a single list\"\"\" o = [] for m in", "with serialized objects \"\"\" if int(obj_id) == 0: obj =", "THE SOFTWARE. ################################################################################################## \"\"\" Gallery API :: GET / Lists", "the rights to use, # copy, modify, merge, publish, distribute,", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "Lists the galleries currently visible by the current user POST", "else: res = Result() personal = [] clearance = Gallery.PUBLIC", "Gallery.objects.get_or_create(title=title) g.security = security g.description = description g.owner = request.user", "objects: res.append(i.json()) data[\"count\"] = len(objects) if settings.DEBUG: data[\"queries\"] = connection.queries", "# it already existed so delete it sub.delete() return JsonResponse(Result().asDict())", "obj.security: raise PermissionDenied() tags = json.loads(request.GET.get(\"filters\", \"[[]]\")) more = json.loads(request.GET.get(\"more\",", "a.modified < b.modified: return 1 elif a.modified > b.modified: return", "JsonResponse from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.db.models import Q,", "@login_required def subscribe(request, obj_id): gallery = Gallery.objects.get(pk=obj_id) data = json.loads(request.body)[\"body\"]", "django.views.decorators.http import require_POST from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import", "return get(request, obj_id) elif request.method == \"POST\": return post(request) elif", "is None: continue if tags: for bucket in tags: searchQuery", "last ids lastids = {} for obj in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])]", "objects to the gallery DELETE /id Removes image or video", "granted, free of charge, to any person obtaining a copy", "import ContentType from django.contrib.auth.decorators import login_required from django.conf import settings", "break elif isinstance(item, six.integer_types): # filter by tag if not", "== 0: # filter by tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if not o:", "# the Software without restriction, including without limitation the rights", "= [ _ for _ in ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(), Piece)", "render_to_string from django.views.decorators.http import require_POST from django.contrib.contenttypes.models import ContentType from", "index = idDict[m.model].index(lastid) except ValueError: index = 0 if more", "if not more: lastid = idDict[m.model][0] try: index = idDict[m.model].index(lastid)", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR #", "fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items) res = Result() res.append(gallery.json()) return JsonResponse(res.asDict())", "\"\"\" Adds Image and Video objects to Gallery based on", "for key, value in lastids.items(): request.session[key] = value # serialize", ") g, created = Gallery.objects.get_or_create(title=title) g.security = security g.description =", "o = list(set(o)) sortfunc = _sortByCreated if orderby == \"created\"", "Get all ids of filtered objects, this will be a", "idDict[m.model].none() # Remove hidden items before slicing so we get", "fast query idDict[m.model] = list( idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True) )", "import six import json try: from haystack.query import SearchQuerySet HAYSTACK", "searchQuery = \"\" o = None for item in bucket:", "limitation the rights to use, # copy, modify, merge, publish,", "= request.user.frog_prefs.first().clearance except AttributeError: clearance = Gallery.PUBLIC # Staff members", "not idDict[m.model]: continue if not more: lastid = idDict[m.model][0] try:", "res.value = data return JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts lists", "def get(request, obj_id=None): if obj_id: obj = Gallery.objects.get(pk=obj_id) if obj.security", "gallery = Gallery.objects.get(pk=obj_id) data = json.loads(request.body)[\"body\"] frequency = data.get(\"frequency\", GallerySubscription.WEEKLY)", "continue if not more: lastid = idDict[m.model][0] try: index =", "searchQuery += item + \" \" if not HAYSTACK: if", "res.append(i.json()) data[\"count\"] = len(objects) if settings.DEBUG: data[\"queries\"] = connection.queries res.value", "currently visible by the current user POST / Creates a", "= data.get(\"security\") gallery = Gallery.objects.get(pk=obj_id) # Set the security first", "session range return list, Objects filtered \"\"\" res = Result()", "= data.get(\"frequency\", GallerySubscription.WEEKLY) sub, created = GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency", "json.loads(request.body)[\"body\"] title = data.get(\"title\", defaultname) description = data.get(\"description\", \"\") security", "serialize objects for i in objects: res.append(i.json()) data[\"count\"] = len(objects)", "a Gallery \"\"\" defaultname = \"New Gallery %i\" % Gallery.objects.all().count()", "= logging.getLogger(\"frog\") try: QUERY_MODELS = [ _ for _ in", "requests from django.core.mail import mail_managers from django.http import JsonResponse from", "the objects we want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model]", "# # The above copyright notice and this permission notice", "gallery in personal: res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def post(request): \"\"\"", "BATCH_LENGTH] # perform the main query to retrieve the objects", "tags: for bucket in tags: searchQuery = \"\" o =", "in QUERY_MODELS: modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_)", "Adds Image and Video objects to Gallery based on GUIDs", "IDs for each model for m in QUERY_MODELS: modelmap[m.model_class()] =", "a request based on method and calls the appropriate function\"\"\"", "to {}\".format( getClientIP(request), obj ) ) raise PermissionDenied() if obj", "res.message = \"Gallery created\" if created else \"\" return JsonResponse(res.asDict())", "out last ids lastids = {} for obj in objects:", "objects, this will be a very fast query idDict[m.model] =", "= json.loads(request.GET.get(\"more\", \"false\")) orderby = request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] ) tags", "post(request) elif request.method == \"PUT\": return put(request, obj_id) elif request.method", "put(request, obj_id) elif request.method == \"DELETE\": return delete(request, obj_id) def", "idDict[m.model]: continue if not more: lastid = idDict[m.model][0] try: index", "objects from Gallery \"\"\" data = json.loads(request.body) guids = data.get(\"guids\").split(\",\")", "getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id) LOGGER.info( \"{} removed {} from {}\".format(request.user.email,", "= False from frog.models import ( Gallery, Image, Video, Group,", "the current user POST / Creates a gallery object GET", "string searchQuery += item + \" \" if not HAYSTACK:", "else \"\" return JsonResponse(res.asDict()) @login_required def put(request, obj_id=None): \"\"\" Adds", "not o: o = Q() o |= Q(tags__id=item) else: #", "# Staff members should see everything if request.user.is_staff: clearance =", "Result() res.append(g.json()) res.message = \"Gallery created\" if created else \"\"", "in ids: continue ids.append(gallery.id) res.append(gallery.json()) for gallery in personal: res.append(gallery.json())", "# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "combines them into a single list\"\"\" o = [] for", "\"\"\"Handles a request based on method and calls the appropriate", "ImageVideo objects from Gallery \"\"\" data = json.loads(request.body) guids =", "guids: items = getObjectsFromGuids(guids) gallery.addItems(items) if move: fromgallery = Gallery.objects.get(pk=move)", "for m in QUERY_MODELS: modelmap[m.model_class()] = m.model if object_: idDict[m.model]", "a copy of # this software and associated documentation files", "data.get(\"from\") security = data.get(\"security\") gallery = Gallery.objects.get(pk=obj_id) # Set the", "sublicense, and/or sell copies of the # Software, and to", "personal: res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def post(request): \"\"\" Create a", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "lastids.items(): request.session[key] = value # serialize objects for i in", "OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT", "obj and obj.security != Gallery.PUBLIC: LOGGER.warning( \"There was an anonymous", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "\"\"\" query = query.strip() LOGGER.debug(query) sqs = SearchQuerySet() results =", "return JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts lists of objects and", "**kwargs): \"\"\"Sorts lists of objects and combines them into a", "# Copyright (c) 2012 <NAME> # # Permission is hereby", "django.db import connection from django.db.utils import ProgrammingError from django.template.loader import", "value in lastids.items(): request.session[key] = value # serialize objects for", "m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] = ( objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby))", "json.loads(request.GET.get(\"filters\", \"[[]]\")) more = json.loads(request.GET.get(\"more\", \"false\")) orderby = request.GET.get( \"orderby\",", "a search query and returns the object ids \"\"\" query", "# The above copyright notice and this permission notice shall", "\"\": # once all tags have been filtered, filter by", "lastid = idDict[m.model][0] try: index = idDict[m.model].index(lastid) except ValueError: index", "obj.id for key, value in lastids.items(): request.session[key] = value #", "in personal: res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def post(request): \"\"\" Create", "clearance = Gallery.PUBLIC if request.user.is_authenticated: personal = Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user", "items = getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id) LOGGER.info( \"{} removed {}", "if security is not None: gallery.security = json.loads(security) gallery.save() for", "is None: LOGGER.warning( \"There was an anonymous access attempt from", "ProgrammingError: pass BATCH_LENGTH = 75 def index(request, obj_id=None): \"\"\"Handles a", "PermissionDenied() tags = json.loads(request.GET.get(\"filters\", \"[[]]\")) more = json.loads(request.GET.get(\"more\", \"false\")) orderby", "\"\"\" Create a Gallery \"\"\" defaultname = \"New Gallery %i\"", "idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove deleted items before slicing so", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "idDict[m.model].index(lastid) except ValueError: index = 0 if more and lastid", "return 1 elif a.created > b.created: return -1 else: return", "idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True) ) lastid = request.session.get(\"last_{}\".format(m.model), 0) if", "/id Removes image or video objects from the gallery GET", "following conditions: # # The above copyright notice and this", "if not o: o = Q() o |= Q(num_tags__lte=1) break", "conditions: # # The above copyright notice and this permission", "tags=tags, more=more, orderby=orderby) def _filter(request, object_, tags=None, more=False, orderby=\"created\"): \"\"\"Filters", "objects = Gallery.objects.filter(security__lte=clearance) ids = [] for gallery in objects:", "QUERY_MODELS = [ _ for _ in ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(),", "gallery object GET /id Gallery object if visible by the", "function\"\"\" if request.method == \"GET\": return get(request, obj_id) elif request.method", "= json.loads(request.GET.get(\"filters\", \"[[]]\")) more = json.loads(request.GET.get(\"more\", \"false\")) orderby = request.GET.get(", "\"GET\": return get(request, obj_id) elif request.method == \"POST\": return post(request)", "be a very fast query idDict[m.model] = list( idDict[m.model] .order_by(\"-{}\".format(orderby))", "%i\" % Gallery.objects.all().count() data = json.loads(request.body)[\"body\"] title = data.get(\"title\", defaultname)", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "|= Q(tags__id=item) else: # add to search string searchQuery +=", ".order_by(\"-{}\".format(orderby)) ) objDict[m.model] = list(objDict[m.model]) # combine and sort all", "@login_required def put(request, obj_id=None): \"\"\" Adds Image and Video objects", "def filterObjects(request, obj_id): \"\"\" Filters Gallery for the requested ImageVideo", "for i in objects: res.append(i.json()) data[\"count\"] = len(objects) if settings.DEBUG:", "apply the filters idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o) )", "guids = data.get(\"guids\").split(\",\") items = getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id) LOGGER.info(", "< b.created: return 1 elif a.created > b.created: return -1", "filterObjects(request, obj_id): \"\"\" Filters Gallery for the requested ImageVideo objects.", "= m.model_class().objects.all() if idDict[m.model] is None: continue if tags: for", "return post(request) elif request.method == \"PUT\": return put(request, obj_id) elif", "o |= Q(id__in=searchIDs) if o: # apply the filters idDict[m.model]", "def _sortByCreated(a, b): \"\"\"Sort function for object by created date\"\"\"", "whom the Software is furnished to do so, # subject", "title = data.get(\"title\", defaultname) description = data.get(\"description\", \"\") security =", "to search string searchQuery += item + \" \" if", "we want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] = (", "_sortByCreated if orderby == \"created\" else _sortByModified if six.PY2: o.sort(sortfunc)", "not HAYSTACK: if not o: o = Q() # use", "if move: fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items) res = Result() res.append(gallery.json())", "PermissionDenied else: res = Result() personal = [] clearance =", "fromgallery.removeItems(items) res = Result() res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def delete(request,", "o = Q() o |= Q(tags__id=item) else: # add to", "getObjectsFromGuids, getClientIP LOGGER = logging.getLogger(\"frog\") try: QUERY_MODELS = [ _", "1 elif a.created > b.created: return -1 else: return 0", "return -1 else: return 0 def _sortByModified(a, b): \"\"\"Sort function", "objDict[m.model] = ( objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) ) objDict[m.model] =", "return delete(request, obj_id) def get(request, obj_id=None): if obj_id: obj =", "# # Permission is hereby granted, free of charge, to", "subject to the following conditions: # # The above copyright", "ValueError: index = 0 if more and lastid != 0:", "distribute, sublicense, and/or sell copies of the # Software, and", "a.modified > b.modified: return -1 else: return 0 def search(query,", ":param more -- bool, Returns more of the same filtered", "from django.views.decorators.http import require_POST from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators", "django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.db.models import Q, Count from", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "Gallery.objects.get(pk=move) fromgallery.removeItems(items) res = Result() res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS #", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "more -- bool, Returns more of the same filtered set", "idDict[m.model] = list( idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True) ) lastid =", "Gallery.objects.get(pk=obj_id) data = json.loads(request.body)[\"body\"] frequency = data.get(\"frequency\", GallerySubscription.WEEKLY) sub, created", "THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY", "\"Software\"), to deal in # the Software without restriction, including", "if settings.DEBUG: data[\"queries\"] = connection.queries res.value = data return JsonResponse(res.asDict())", "lists of objects and combines them into a single list\"\"\"", "request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] ) tags = [t for t in", "from haystack.query import SearchQuerySet HAYSTACK = True except (ImportError, ImproperlyConfigured):", "a single list\"\"\" o = [] for m in kwargs.values():", "for obj in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id for key, value", "bucket: if item == 0: # filter by tagless idDict[m.model].annotate(num_tags=Count(\"tags\"))", "1 idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH] # perform", "in tags if t] return _filter(request, obj, tags=tags, more=more, orderby=orderby)", "from django.core.mail import mail_managers from django.http import JsonResponse from django.core.exceptions", "Image, Video, Group, GallerySubscription, SiteConfig, Piece, ) from frog.common import", "Removes image or video objects from the gallery GET /filter", "return JsonResponse(res.asDict()) @login_required def delete(request, obj_id=None): \"\"\" Removes ImageVideo objects", "{}\".format( getClientIP(request), obj ) ) raise PermissionDenied() if obj and", "time import functools import logging import requests from django.core.mail import", "res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def post(request): \"\"\" Create a Gallery", "of tag IDs to filter :type tags: list :param more", "SearchQuerySet HAYSTACK = True except (ImportError, ImproperlyConfigured): HAYSTACK = False", "= json.loads(request.body)[\"body\"] title = data.get(\"title\", defaultname) description = data.get(\"description\", \"\")", "!= \"\": # once all tags have been filtered, filter", "and/or sell copies of the # Software, and to permit", "put(request, obj_id=None): \"\"\" Adds Image and Video objects to Gallery", "g.security = security g.description = description g.owner = request.user g.save()", "request.session.get(\"last_{}\".format(m.model), 0) if not idDict[m.model]: continue if not more: lastid", "else _sortByModified if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def", "sell copies of the # Software, and to permit persons", "res = Result() personal = [] clearance = Gallery.PUBLIC if", "|= Q(title__icontains=item) if HAYSTACK and searchQuery != \"\": # once", "{}\".format( getClientIP(request), obj ) ) raise PermissionDenied() if isanonymous and", "0 def _sortByModified(a, b): \"\"\"Sort function for object by modified", "Set the security first so subsequent securityChecks will get the", "django.core.mail import mail_managers from django.http import JsonResponse from django.core.exceptions import", "Copyright (c) 2012 <NAME> # # Permission is hereby granted,", "idDict[m.model] is None: continue if tags: for bucket in tags:", "= json.loads(security) gallery.save() for child in gallery.gallery_set.all(): child.security = gallery.security", "date\"\"\" if a.created < b.created: return 1 elif a.created >", "== \"PUT\": return put(request, obj_id) elif request.method == \"DELETE\": return", "by the current user POST / Creates a gallery object", "( objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) ) objDict[m.model] = list(objDict[m.model]) #", "o in results] @require_POST @login_required def subscribe(request, obj_id): gallery =", "ImageVideo objects. Returns a Result object with serialized objects \"\"\"", "so we get an accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True) #", "request.method == \"POST\": return post(request) elif request.method == \"PUT\": return", "\"\"\" Gallery API :: GET / Lists the galleries currently", "return JsonResponse(res.asDict()) @login_required def put(request, obj_id=None): \"\"\" Adds Image and", "video objects from the gallery GET /filter Returns a filtered", "raise PermissionDenied() tags = json.loads(request.GET.get(\"filters\", \"[[]]\")) more = json.loads(request.GET.get(\"more\", \"false\"))", "IN THE SOFTWARE. ################################################################################################## \"\"\" Gallery API :: GET /", "objects = _sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH] # Find out", "subsequent securityChecks will get the correct security level if security", "HAYSTACK = True except (ImportError, ImproperlyConfigured): HAYSTACK = False from", "+ BATCH_LENGTH] # perform the main query to retrieve the", "shall be included in all # copies or substantial portions", "if not HAYSTACK: if not o: o = Q() #", "in objects: if gallery.security == Gallery.PERSONAL: continue if gallery.id in", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "Q() o |= Q(id__in=searchIDs) if o: # apply the filters", "substantial portions of the Software. # # THE SOFTWARE IS", "notice shall be included in all # copies or substantial", "objects and combines them into a single list\"\"\" o =", "ids = [] for gallery in objects: if gallery.security ==", "not results: results = sqs.raw_search(\"*{}\".format(query)).models(model) if not results: results =", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "image or video objects from the gallery GET /filter Returns", "gallery.id in ids: continue ids.append(gallery.id) res.append(gallery.json()) for gallery in personal:", "if HAYSTACK and searchQuery != \"\": # once all tags", "isanonymous and obj and obj.security != Gallery.PUBLIC: LOGGER.warning( \"There was", "LOGGER.warning( \"There was an anonymous access attempt from {} to", "o def _sortByCreated(a, b): \"\"\"Sort function for object by created", "obj = None else: obj = Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous", "the galleries currently visible by the current user POST /", "try: from haystack.query import SearchQuerySet HAYSTACK = True except (ImportError,", "very fast query idDict[m.model] = list( idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\", flat=True)", "LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR", "[ _ for _ in ContentType.objects.filter(app_label=\"frog\") if issubclass(_.model_class(), Piece) ]", "get an accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get all", "if tags: for bucket in tags: searchQuery = \"\" o", "copyright notice and this permission notice shall be included in", "objDict[m.model] = list(objDict[m.model]) # combine and sort all objects by", "0: index += 1 idDict[m.model] = idDict[m.model][index : index +", "= Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous if isanonymous and obj is", "obj in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id for key, value in", "from django.conf import settings import six import json try: from", "and to permit persons to whom the Software is furnished", "{} modelmap = {} # Get all IDs for each", "level if security is not None: gallery.security = json.loads(security) gallery.save()", "ImproperlyConfigured, PermissionDenied from django.db.models import Q, Count from django.db import", "elif request.method == \"POST\": return post(request) elif request.method == \"PUT\":", "{} # Get all IDs for each model for m", "and range :param tags: List of tag IDs to filter", "Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance) ids = [] for gallery in", ") lastid = request.session.get(\"last_{}\".format(m.model), 0) if not idDict[m.model]: continue if", "tags=None, more=False, orderby=\"created\"): \"\"\"Filters Piece objects from self based on", "if searchIDs: if not o: o = Q() o |=", "set of images based on session range return list, Objects", "= Gallery.objects.get_or_create(title=title) g.security = security g.description = description g.owner =", "import ( Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece, )", "= idDict[m.model][0] try: index = idDict[m.model].index(lastid) except ValueError: index =", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "import time import functools import logging import requests from django.core.mail", "request.method == \"DELETE\": return delete(request, obj_id) def get(request, obj_id=None): if", "o = None for item in bucket: if item ==", "< b.modified: return 1 elif a.modified > b.modified: return -1", "user=request.user, frequency=frequency ) if not created: # it already existed", "range return list, Objects filtered \"\"\" res = Result() idDict", "objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] = ( objDict[m.model] .select_related(\"author\")", "list(set(o)) sortfunc = _sortByCreated if orderby == \"created\" else _sortByModified", "use, # copy, modify, merge, publish, distribute, sublicense, and/or sell", "query.strip() LOGGER.debug(query) sqs = SearchQuerySet() results = sqs.raw_search(\"{}*\".format(query)).models(model) if not", "Performs a search query and returns the object ids \"\"\"", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "bucket in tags: searchQuery = \"\" o = None for", "ids: continue ids.append(gallery.id) res.append(gallery.json()) for gallery in personal: res.append(gallery.json()) return", "MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "model): \"\"\" Performs a search query and returns the object", "basic search o |= Q(title__icontains=item) if HAYSTACK and searchQuery !=", "is furnished to do so, # subject to the following", "\"{} removed {} from {}\".format(request.user.email, guids, gallery) ) gallery.removeItems(items) res", "Q() o |= Q(tags__id=item) else: # add to search string", "each model for m in QUERY_MODELS: modelmap[m.model_class()] = m.model if", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT", "from self based on filters, search, and range :param tags:", "ARISING FROM, OUT OF OR IN CONNECTION # WITH THE", "GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency ) if not created: # it", "see everything if request.user.is_staff: clearance = Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance)", "guids, gallery) ) gallery.removeItems(items) res = Result() return JsonResponse(res.asDict()) @login_required", "first so subsequent securityChecks will get the correct security level", "an accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove deleted items", "[] for m in kwargs.values(): for l in iter(m): o.append(l)", "created date\"\"\" if a.created < b.created: return 1 elif a.created", "restriction, including without limitation the rights to use, # copy,", "IDs to filter :type tags: list :param more -- bool,", "= description g.owner = request.user g.save() res = Result() res.append(g.json())", "for the requested ImageVideo objects. Returns a Result object with", "and this permission notice shall be included in all #", "obj is None: LOGGER.warning( \"There was an anonymous access attempt", "WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous if isanonymous and obj is None:", "Objects filtered \"\"\" res = Result() idDict = {} objDict", "import settings import six import json try: from haystack.query import", "items before slicing so we get an accurate count idDict[m.model]", "json.loads(security) gallery.save() for child in gallery.gallery_set.all(): child.security = gallery.security child.save()", "not more: lastid = idDict[m.model][0] try: index = idDict[m.model].index(lastid) except", "image and video objects \"\"\" import time import functools import", "!= 0: index += 1 idDict[m.model] = idDict[m.model][index : index", "by created date\"\"\" if a.created < b.created: return 1 elif", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "tags if t] return _filter(request, obj, tags=tags, more=more, orderby=orderby) def", "gallery DELETE /id Removes image or video objects from the", "(c) 2012 <NAME> # # Permission is hereby granted, free", "haystack.query import SearchQuerySet HAYSTACK = True except (ImportError, ImproperlyConfigured): HAYSTACK", "to use, # copy, modify, merge, publish, distribute, sublicense, and/or", "for gallery in objects: if gallery.security == Gallery.PERSONAL: continue if", "\"\"\"Filters Piece objects from self based on filters, search, and", "# apply the filters idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o)", "= data return JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts lists of", "object with serialized objects \"\"\" if int(obj_id) == 0: obj", "m.model_class()) if searchIDs: if not o: o = Q() o", "import Result, getObjectsFromGuids, getClientIP LOGGER = logging.getLogger(\"frog\") try: QUERY_MODELS =", "to filter :type tags: list :param more -- bool, Returns", "in iter(m): o.append(l) o = list(set(o)) sortfunc = _sortByCreated if", "# serialize objects for i in objects: res.append(i.json()) data[\"count\"] =", "Group, GallerySubscription, SiteConfig, Piece, ) from frog.common import Result, getObjectsFromGuids,", "m in kwargs.values(): for l in iter(m): o.append(l) o =", "of image and video objects \"\"\" import time import functools", "list of image and video objects \"\"\" import time import", "= Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): #", "anonymous access attempt from {} to {}\".format( getClientIP(request), obj )", "lastids = {} for obj in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id", "Permission is hereby granted, free of charge, to any person", "> b.created: return -1 else: return 0 def _sortByModified(a, b):", "{}\".format(request.user.email, guids, gallery) ) gallery.removeItems(items) res = Result() return JsonResponse(res.asDict())", "of charge, to any person obtaining a copy of #", ") objDict[m.model] = ( objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) ) objDict[m.model]", "g.owner = request.user g.save() res = Result() res.append(g.json()) res.message =", "= [] for gallery in objects: if gallery.security == Gallery.PERSONAL:", "= list(objDict[m.model]) # combine and sort all objects by date", ") raise PermissionDenied() if isanonymous and obj and obj.security !=", "created\" if created else \"\" return JsonResponse(res.asDict()) @login_required def put(request,", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "object by created date\"\"\" if a.created < b.created: return 1", "g.save() res = Result() res.append(g.json()) res.message = \"Gallery created\" if", "if idDict[m.model] is None: continue if tags: for bucket in", "= None for item in bucket: if item == 0:", "search, and range :param tags: List of tag IDs to", "gallery) ) gallery.removeItems(items) res = Result() return JsonResponse(res.asDict()) @login_required def", "g, created = Gallery.objects.get_or_create(title=title) g.security = security g.description = description", "based on GUIDs \"\"\" data = json.loads(request.body)[\"body\"] guids = data.get(\"guids\",", "from frog.common import Result, getObjectsFromGuids, getClientIP LOGGER = logging.getLogger(\"frog\") try:", "\"false\")) orderby = request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] ) tags = [t", "( Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece, ) from", "= sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk for o in results] @require_POST @login_required", "obj_id: obj = Gallery.objects.get(pk=obj_id) if obj.security != Gallery.PUBLIC and request.user.is_anonymous:", "idDict[m.model].annotate(num_tags=Count(\"tags\")) if not o: o = Q() o |= Q(num_tags__lte=1)", "created = GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency ) if not created:", "the object ids \"\"\" query = query.strip() LOGGER.debug(query) sqs =", "+= item + \" \" if not HAYSTACK: if not", "\"\"\"Sort function for object by created date\"\"\" if a.created <", "if guids: items = getObjectsFromGuids(guids) gallery.addItems(items) if move: fromgallery =", "BATCH_LENGTH = 75 def index(request, obj_id=None): \"\"\"Handles a request based", "from django.http import JsonResponse from django.core.exceptions import ImproperlyConfigured, PermissionDenied from", "searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not o: o", "Result() res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def delete(request, obj_id=None): \"\"\" Removes", "if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if", "= idDict[m.model].exclude(hidden=True) # Remove deleted items before slicing so we", "gallery in objects: if gallery.security == Gallery.PERSONAL: continue if gallery.id", "\"\") security = int( data.get(\"security\", request.user.frog_prefs.first().clearance) ) g, created =", ") if not created: # it already existed so delete", "orderby=\"created\"): \"\"\"Filters Piece objects from self based on filters, search,", "Software without restriction, including without limitation the rights to use,", "in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items():", "DEALINGS IN THE SOFTWARE. ################################################################################################## \"\"\" Gallery API :: GET", "elif isinstance(item, six.integer_types): # filter by tag if not o:", "deleted items before slicing so we get an accurate count", "# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "hereby granted, free of charge, to any person obtaining a", "django.http import JsonResponse from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.db.models", "HAYSTACK: if not o: o = Q() # use a", "sqs = SearchQuerySet() results = sqs.raw_search(\"{}*\".format(query)).models(model) if not results: results", "= Q() o |= Q(id__in=searchIDs) if o: # apply the", "obj_id) def get(request, obj_id=None): if obj_id: obj = Gallery.objects.get(pk=obj_id) if", "from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import login_required from django.conf", "without limitation the rights to use, # copy, modify, merge,", "use a basic search o |= Q(title__icontains=item) if HAYSTACK and", "def _sortByModified(a, b): \"\"\"Sort function for object by modified date\"\"\"", "Gallery.objects.all().count() data = json.loads(request.body)[\"body\"] title = data.get(\"title\", defaultname) description =", "results = sqs.raw_search(\"*{}\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}*\".format(query)).models(model) return", "else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue if", "PermissionDenied() if isanonymous and obj and obj.security != Gallery.PUBLIC: LOGGER.warning(", "obj ) ) raise PermissionDenied() if obj and obj.security !=", "json.loads(request.body) guids = data.get(\"guids\").split(\",\") items = getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id)", "{} data = {} modelmap = {} # Get all", "before slicing so we get an accurate count idDict[m.model] =", "= True except (ImportError, ImproperlyConfigured): HAYSTACK = False from frog.models", "if not results: results = sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk for o", "|= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # filter by tag", "rights to use, # copy, modify, merge, publish, distribute, sublicense,", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "Gallery.PUBLIC # Staff members should see everything if request.user.is_staff: clearance", "GUIDs \"\"\" data = json.loads(request.body)[\"body\"] guids = data.get(\"guids\", \"\").split(\",\") move", "so we get an accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True) #", "not results: results = sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk for o in", "copy of # this software and associated documentation files (the", ") ) raise PermissionDenied() if obj and obj.security != Gallery.PERSONAL:", "raise PermissionDenied() if obj and obj.security != Gallery.PERSONAL: if request.user.frog_prefs.first().clearance", "by tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if not o: o = Q() o", "True except (ImportError, ImproperlyConfigured): HAYSTACK = False from frog.models import", "QUERY_MODELS: modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else:", "objects by date objects = _sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH]", "connection.queries res.value = data return JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts", "request.method == \"PUT\": return put(request, obj_id) elif request.method == \"DELETE\":", "filter :type tags: list :param more -- bool, Returns more", "objDict = {} data = {} modelmap = {} #", "and obj.security != Gallery.PERSONAL: if request.user.frog_prefs.first().clearance < obj.security: raise PermissionDenied()", "-- bool, Returns more of the same filtered set of", "\"created\" else _sortByModified if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o", "a basic search o |= Q(title__icontains=item) if HAYSTACK and searchQuery", "in results] @require_POST @login_required def subscribe(request, obj_id): gallery = Gallery.objects.get(pk=obj_id)", "included in all # copies or substantial portions of the", "orderby == \"created\" else _sortByModified if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc))", "LOGGER.debug(query) sqs = SearchQuerySet() results = sqs.raw_search(\"{}*\".format(query)).models(model) if not results:", "# Remove deleted items before slicing so we get an", "( idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o) ) else: idDict[m.model] = idDict[m.model].none() #", "objects = objects[:BATCH_LENGTH] # Find out last ids lastids =", "return JsonResponse(res.asDict()) @login_required def post(request): \"\"\" Create a Gallery \"\"\"", "/ Lists the galleries currently visible by the current user", "filter by tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if not o: o = Q()", "furnished to do so, # subject to the following conditions:", "permission notice shall be included in all # copies or", "personal = Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user ) try: clearance = request.user.frog_prefs.first().clearance", "slicing so we get an accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True)", "o = Q() o |= Q(id__in=searchIDs) if o: # apply", "Gallery.objects.get(pk=obj_id) LOGGER.info( \"{} removed {} from {}\".format(request.user.email, guids, gallery) )", "# subject to the following conditions: # # The above", "six import json try: from haystack.query import SearchQuerySet HAYSTACK =", "all ids of filtered objects, this will be a very", "try: index = idDict[m.model].index(lastid) except ValueError: index = 0 if", "idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o) ) else: idDict[m.model] = idDict[m.model].none() # Remove", "None else: obj = Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous if isanonymous", "def put(request, obj_id=None): \"\"\" Adds Image and Video objects to", "for m in kwargs.values(): for l in iter(m): o.append(l) o", "b.modified: return 1 elif a.modified > b.modified: return -1 else:", "removed {} from {}\".format(request.user.email, guids, gallery) ) gallery.removeItems(items) res =", "# use a basic search o |= Q(title__icontains=item) if HAYSTACK", "m.model_class().objects.all() if idDict[m.model] is None: continue if tags: for bucket", "0: # filter by tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if not o: o", "b): \"\"\"Sort function for object by modified date\"\"\" if a.modified", "\"\"\"Sort function for object by modified date\"\"\" if a.modified <", "notice and this permission notice shall be included in all", "objects \"\"\" if int(obj_id) == 0: obj = None else:", "= request.user.is_anonymous if isanonymous and obj is None: LOGGER.warning( \"There", "this will be a very fast query idDict[m.model] = list(", "= data.get(\"guids\").split(\",\") items = getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id) LOGGER.info( \"{}", "o: o = Q() o |= Q(num_tags__lte=1) break elif isinstance(item,", "from django.db import connection from django.db.utils import ProgrammingError from django.template.loader", "the main query to retrieve the objects we want objDict[m.model]", "six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a, b): \"\"\"Sort", "request.user.frog_prefs.get().json()[\"orderby\"] ) tags = [t for t in tags if", "data return JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts lists of objects", "= m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] = ( objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\")", "Result, getObjectsFromGuids, getClientIP LOGGER = logging.getLogger(\"frog\") try: QUERY_MODELS = [", "data.get(\"security\", request.user.frog_prefs.first().clearance) ) g, created = Gallery.objects.get_or_create(title=title) g.security = security", "a Result object with serialized objects \"\"\" if int(obj_id) ==", "Software is furnished to do so, # subject to the", "results: results = sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk for o in results]", "0 def search(query, model): \"\"\" Performs a search query and", "clearance = request.user.frog_prefs.first().clearance except AttributeError: clearance = Gallery.PUBLIC # Staff", "if not o: o = Q() o |= Q(tags__id=item) else:", "= json.loads(request.body)[\"body\"] guids = data.get(\"guids\", \"\").split(\",\") move = data.get(\"from\") security", "= request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] ) tags = [t for t", "= 75 def index(request, obj_id=None): \"\"\"Handles a request based on", "WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND", "created else \"\" return JsonResponse(res.asDict()) @login_required def put(request, obj_id=None): \"\"\"", "serialized objects \"\"\" if int(obj_id) == 0: obj = None", "= {} modelmap = {} # Get all IDs for", "for item in bucket: if item == 0: # filter", "o: # apply the filters idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count(\"tags\"))", "ids lastids = {} for obj in objects: lastids[\"last_{}\".format(modelmap[obj.__class__])] =", "obj = Gallery.objects.get(pk=obj_id) if obj.security != Gallery.PUBLIC and request.user.is_anonymous: raise", "else: return 0 def _sortByModified(a, b): \"\"\"Sort function for object", "= data.get(\"from\") security = data.get(\"security\") gallery = Gallery.objects.get(pk=obj_id) # Set", "images based on session range return list, Objects filtered \"\"\"", "tag if not o: o = Q() o |= Q(tags__id=item)", "image or video objects to the gallery DELETE /id Removes", "import SearchQuerySet HAYSTACK = True except (ImportError, ImproperlyConfigured): HAYSTACK =", "\"Gallery created\" if created else \"\" return JsonResponse(res.asDict()) @login_required def", "= search(searchQuery, m.model_class()) if searchIDs: if not o: o =", "json try: from haystack.query import SearchQuerySet HAYSTACK = True except", "obj_id=None): \"\"\" Adds Image and Video objects to Gallery based", "getClientIP(request), obj ) ) raise PermissionDenied() if obj and obj.security", "orderby = request.GET.get( \"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] ) tags = [t for", "the same filtered set of images based on session range", "idDict = {} objDict = {} data = {} modelmap", "API :: GET / Lists the galleries currently visible by", "= ( idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o) ) else: idDict[m.model] = idDict[m.model].none()", "Result() return JsonResponse(res.asDict()) @login_required def filterObjects(request, obj_id): \"\"\" Filters Gallery", "more: lastid = idDict[m.model][0] try: index = idDict[m.model].index(lastid) except ValueError:", "obj and obj.security != Gallery.PERSONAL: if request.user.frog_prefs.first().clearance < obj.security: raise", "for object by modified date\"\"\" if a.modified < b.modified: return", "in # the Software without restriction, including without limitation the", "= Result() idDict = {} objDict = {} data =", "= GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency ) if not created: #", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "not o: o = Q() # use a basic search", "data.get(\"guids\").split(\",\") items = getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id) LOGGER.info( \"{} removed", "\"\"\" if int(obj_id) == 0: obj = None else: obj", "for gallery in personal: res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def post(request):", "tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if not o: o = Q() o |=", "frog.common import Result, getObjectsFromGuids, getClientIP LOGGER = logging.getLogger(\"frog\") try: QUERY_MODELS", "try: clearance = request.user.frog_prefs.first().clearance except AttributeError: clearance = Gallery.PUBLIC #", "import login_required from django.conf import settings import six import json", "def post(request): \"\"\" Create a Gallery \"\"\" defaultname = \"New", "get an accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove deleted", "and lastid != 0: index += 1 idDict[m.model] = idDict[m.model][index", "= idDict[m.model].index(lastid) except ValueError: index = 0 if more and", "o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a, b): \"\"\"Sort function for object", "from frog.models import ( Gallery, Image, Video, Group, GallerySubscription, SiteConfig,", "= Gallery.objects.get(pk=move) fromgallery.removeItems(items) res = Result() res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required", "calls the appropriate function\"\"\" if request.method == \"GET\": return get(request,", "data.get(\"frequency\", GallerySubscription.WEEKLY) sub, created = GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency )", "|= Q(id__in=searchIDs) if o: # apply the filters idDict[m.model] =", "in all # copies or substantial portions of the Software.", "this software and associated documentation files (the \"Software\"), to deal", ".annotate(num_tags=Count(\"tags\")) .filter(o) ) else: idDict[m.model] = idDict[m.model].none() # Remove hidden", "1 elif a.modified > b.modified: return -1 else: return 0", "= Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user ) try: clearance = request.user.frog_prefs.first().clearance except", "CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER", "obj_id=None): if obj_id: obj = Gallery.objects.get(pk=obj_id) if obj.security != Gallery.PUBLIC", "ids of filtered objects, this will be a very fast", "sort all objects by date objects = _sortObjects(orderby, **objDict) objects", "GET /id Gallery object if visible by the current user", "object by modified date\"\"\" if a.modified < b.modified: return 1", "current user PUT /id Adds image or video objects to", "for object by created date\"\"\" if a.created < b.created: return", "= [] for m in kwargs.values(): for l in iter(m):", "query to retrieve the objects we want objDict[m.model] = m.model_class().objects.filter(", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "owner=request.user ) try: clearance = request.user.frog_prefs.first().clearance except AttributeError: clearance =", "return _filter(request, obj, tags=tags, more=more, orderby=orderby) def _filter(request, object_, tags=None,", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "on method and calls the appropriate function\"\"\" if request.method ==", "of # this software and associated documentation files (the \"Software\"),", "\"New Gallery %i\" % Gallery.objects.all().count() data = json.loads(request.body)[\"body\"] title =", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "JsonResponse(res.asDict()) def _sortObjects(orderby=\"created\", **kwargs): \"\"\"Sorts lists of objects and combines", "-1 else: return 0 def search(query, model): \"\"\" Performs a", "data = json.loads(request.body)[\"body\"] frequency = data.get(\"frequency\", GallerySubscription.WEEKLY) sub, created =", "> b.modified: return -1 else: return 0 def search(query, model):", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "if not idDict[m.model]: continue if not more: lastid = idDict[m.model][0]", "connection from django.db.utils import ProgrammingError from django.template.loader import render_to_string from", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #", "{} from {}\".format(request.user.email, guids, gallery) ) gallery.removeItems(items) res = Result()", "Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # filter", "was an anonymous access attempt from {} to {}\".format( getClientIP(request),", "appropriate function\"\"\" if request.method == \"GET\": return get(request, obj_id) elif", "associated documentation files (the \"Software\"), to deal in # the", "object_, tags=None, more=False, orderby=\"created\"): \"\"\"Filters Piece objects from self based", "if a.modified < b.modified: return 1 elif a.modified > b.modified:", "== Gallery.PERSONAL: continue if gallery.id in ids: continue ids.append(gallery.id) res.append(gallery.json())", "2012 <NAME> # # Permission is hereby granted, free of", "USE OR OTHER DEALINGS IN THE SOFTWARE. ################################################################################################## \"\"\" Gallery", "access attempt from {} to {}\".format( getClientIP(request), obj ) )", "flat=True) ) lastid = request.session.get(\"last_{}\".format(m.model), 0) if not idDict[m.model]: continue", "{} to {}\".format( getClientIP(request), obj ) ) raise PermissionDenied() if", "data = json.loads(request.body)[\"body\"] guids = data.get(\"guids\", \"\").split(\",\") move = data.get(\"from\")", "items = getObjectsFromGuids(guids) gallery.addItems(items) if move: fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items)", "Gallery.PUBLIC: LOGGER.warning( \"There was an anonymous access attempt from {}", "def _filter(request, object_, tags=None, more=False, orderby=\"created\"): \"\"\"Filters Piece objects from", "for bucket in tags: searchQuery = \"\" o = None", "= m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] =", "= \"New Gallery %i\" % Gallery.objects.all().count() data = json.loads(request.body)[\"body\"] title", "objDict[m.model] .select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) ) objDict[m.model] = list(objDict[m.model]) # combine", "filter by tag if not o: o = Q() o", "visible by the current user PUT /id Adds image or", "delete(request, obj_id) def get(request, obj_id=None): if obj_id: obj = Gallery.objects.get(pk=obj_id)", "on filters, search, and range :param tags: List of tag", "not o: o = Q() o |= Q(num_tags__lte=1) break elif", "more=False, orderby=\"created\"): \"\"\"Filters Piece objects from self based on filters,", "require_POST from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import login_required from", "based on session range return list, Objects filtered \"\"\" res", "sqs.raw_search(\"{}*\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}\".format(query)).models(model) if not results:", "and searchQuery != \"\": # once all tags have been", "we get an accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get", "idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count(\"tags\")) .filter(o) ) else: idDict[m.model] =", "the current user PUT /id Adds image or video objects", "to the gallery DELETE /id Removes image or video objects", "Q(tags__id=item) else: # add to search string searchQuery += item", "a.created > b.created: return -1 else: return 0 def _sortByModified(a,", "Piece objects from self based on filters, search, and range", "[o.pk for o in results] @require_POST @login_required def subscribe(request, obj_id):", "= data.get(\"title\", defaultname) description = data.get(\"description\", \"\") security = int(", "get(request, obj_id=None): if obj_id: obj = Gallery.objects.get(pk=obj_id) if obj.security !=", "modified date\"\"\" if a.modified < b.modified: return 1 elif a.modified", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "= Q() # use a basic search o |= Q(title__icontains=item)", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "objects from self based on filters, search, and range :param", "None: gallery.security = json.loads(security) gallery.save() for child in gallery.gallery_set.all(): child.security", "results = sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk for o in results] @require_POST", "if o: # apply the filters idDict[m.model] = ( idDict[m.model]", "if isanonymous and obj and obj.security != Gallery.PUBLIC: LOGGER.warning( \"There", ".select_related(\"author\") .prefetch_related(\"tags\") .order_by(\"-{}\".format(orderby)) ) objDict[m.model] = list(objDict[m.model]) # combine and", "\" if not HAYSTACK: if not o: o = Q()", "copies or substantial portions of the Software. # # THE", "[] for gallery in objects: if gallery.security == Gallery.PERSONAL: continue", "# Remove hidden items before slicing so we get an", "if visible by the current user PUT /id Adds image", "/id Adds image or video objects to the gallery DELETE", "FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "sqs.raw_search(\"*{}*\".format(query)).models(model) return [o.pk for o in results] @require_POST @login_required def", "getClientIP(request), obj ) ) raise PermissionDenied() if isanonymous and obj", "# filter by tagless idDict[m.model].annotate(num_tags=Count(\"tags\")) if not o: o =", "= len(objects) if settings.DEBUG: data[\"queries\"] = connection.queries res.value = data", "want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] = ( objDict[m.model]", "o = Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types):", "\"\"\"Sorts lists of objects and combines them into a single", "get(request, obj_id) elif request.method == \"POST\": return post(request) elif request.method", "else: # add to search string searchQuery += item +", "created = Gallery.objects.get_or_create(title=title) g.security = security g.description = description g.owner", "child.save() if guids: items = getObjectsFromGuids(guids) gallery.addItems(items) if move: fromgallery", "free of charge, to any person obtaining a copy of", "\"\"\" data = json.loads(request.body)[\"body\"] guids = data.get(\"guids\", \"\").split(\",\") move =", "SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "charge, to any person obtaining a copy of # this", "the Software is furnished to do so, # subject to", "gallery = Gallery.objects.get(pk=obj_id) LOGGER.info( \"{} removed {} from {}\".format(request.user.email, guids,", "Software, and to permit persons to whom the Software is", "import json try: from haystack.query import SearchQuerySet HAYSTACK = True", "objects: if gallery.security == Gallery.PERSONAL: continue if gallery.id in ids:", "index += 1 idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH]", "THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE", "i in objects: res.append(i.json()) data[\"count\"] = len(objects) if settings.DEBUG: data[\"queries\"]", "user PUT /id Adds image or video objects to the", "results: results = sqs.raw_search(\"*{}\".format(query)).models(model) if not results: results = sqs.raw_search(\"*{}*\".format(query)).models(model)", "get the correct security level if security is not None:", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "have been filtered, filter by search searchIDs = search(searchQuery, m.model_class())", "video objects \"\"\" import time import functools import logging import", "user POST / Creates a gallery object GET /id Gallery", "obj_id): \"\"\" Filters Gallery for the requested ImageVideo objects. Returns", "objects. Returns a Result object with serialized objects \"\"\" if", "return put(request, obj_id) elif request.method == \"DELETE\": return delete(request, obj_id)", "gallery.save() for child in gallery.gallery_set.all(): child.security = gallery.security child.save() if", "Gallery for the requested ImageVideo objects. Returns a Result object", "iter(m): o.append(l) o = list(set(o)) sortfunc = _sortByCreated if orderby", "else: obj = Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous if isanonymous and", "created: # it already existed so delete it sub.delete() return", ") else: idDict[m.model] = idDict[m.model].none() # Remove hidden items before", "if obj_id: obj = Gallery.objects.get(pk=obj_id) if obj.security != Gallery.PUBLIC and", "search o |= Q(title__icontains=item) if HAYSTACK and searchQuery != \"\":", "return 1 elif a.modified > b.modified: return -1 else: return", "obj_id) elif request.method == \"DELETE\": return delete(request, obj_id) def get(request,", "The above copyright notice and this permission notice shall be", "if isanonymous and obj is None: LOGGER.warning( \"There was an", "res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def delete(request, obj_id=None): \"\"\" Removes ImageVideo", "== \"GET\": return get(request, obj_id) elif request.method == \"POST\": return", "# combine and sort all objects by date objects =", "\"orderby\", request.user.frog_prefs.get().json()[\"orderby\"] ) tags = [t for t in tags", "a very fast query idDict[m.model] = list( idDict[m.model] .order_by(\"-{}\".format(orderby)) .values_list(\"id\",", "move = data.get(\"from\") security = data.get(\"security\") gallery = Gallery.objects.get(pk=obj_id) #", "so subsequent securityChecks will get the correct security level if", "without restriction, including without limitation the rights to use, #", "gallery = Gallery.objects.get(pk=obj_id) # Set the security first so subsequent", "once all tags have been filtered, filter by search searchIDs", "= Gallery.PUBLIC if request.user.is_authenticated: personal = Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user )", "JsonResponse(res.asDict()) @login_required def put(request, obj_id=None): \"\"\" Adds Image and Video", ") raise PermissionDenied() if obj and obj.security != Gallery.PERSONAL: if", "by date objects = _sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH] #" ]
[ "Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal", "0, i)) if len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1,", "len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i)) if len(quest.getSCHowToText(0))", "if len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i)) i", "2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i)) if len(quest.getSCHowToText(0)) > 2:", "return for quest in quests: q = quest if q", "class PSpeedChatQuestMenu(SCMenu): __module__ = __name__ def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh)", "self.__questAddSCChat(q) return def __questAddSCChat(self, quest): qId = quest.questId qDNA =", "(62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017,", "pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus import * from pirates.quest.QuestDNA import", "def __questMenuRefresh(self, quest, item=None, note=None): self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests() if", "q.isComplete(): self.__questAddSCChat(q) return def __questAddSCChat(self, quest): qId = quest.questId qDNA", "if len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i)) if", "quest, item=None, note=None): self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests() if quests is", "in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0,", "2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i)) i = i +", "* from pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus import * from", "qInt = qDNA.questInt i = 0 for task in quest.questDNA.getTasks():", "i)) if len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i))", "if quests is None: return for quest in quests: q", "> 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i)) i = i", "self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i)) i = i + 1", "from otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest", "not qDNA: return qInt = qDNA.questInt i = 0 for", "None: continue if not q.isComplete(): self.__questAddSCChat(q) return def __questAddSCChat(self, quest):", "i = 0 for task in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) >", "self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i)) if len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i),", "qDNA: return qInt = qDNA.questInt i = 0 for task", "2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit", "for task in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt,", "Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] #", "= localAvatar.questStatus.getCurrentQuests() if quests is None: return for quest in", "import SCMenu from otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal", "= QuestDB.QuestDict.get(qId) if not qDNA: return qInt = qDNA.questInt i", "len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i)) i =", "__name__ def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh)", "1, i)) if len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2,", "* class PSpeedChatQuestMenu(SCMenu): __module__ = __name__ def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded',", "[MSC v.1500 32 bit (Intel)] # Embedded file name: pirates.speedchat.PSpeedChatQuestMenu", "is None: return for quest in quests: q = quest", "i)) if len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i))", "from pirates.quest.QuestStatus import * from pirates.quest.QuestDNA import * class PSpeedChatQuestMenu(SCMenu):", "qId = quest.questId qDNA = QuestDB.QuestDict.get(qId) if not qDNA: return", "<reponame>itsyaboyrocket/pirates # uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061)", "if len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i)) if", "SCMenu.destroy(self) def __questMenuRefresh(self, quest, item=None, note=None): self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests()", "from otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal", "# Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu from", "Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32", "import * from pirates.quest.QuestDNA import * class PSpeedChatQuestMenu(SCMenu): __module__ =", "otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest import", "destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self, quest, item=None, note=None): self.clearMenu() quests =", "v.1500 32 bit (Intel)] # Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from", "otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal import", "quest.giverId, 1, i)) if len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId,", "self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh)", "Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC", "return def __questAddSCChat(self, quest): qId = quest.questId qDNA = QuestDB.QuestDict.get(qId)", "# uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) #", "2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i)) if len(quest.getSCWhereIsText(0)) > 2:", "self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests() if quests is None: return for", "(Intel)] # Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu", "uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled", "self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self)", "for quest in quests: q = quest if q is", "return qInt = qDNA.questInt i = 0 for task in", "localAvatar.questStatus.getCurrentQuests() if quests is None: return for quest in quests:", "qInt, quest.giverId, 0, i)) if len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt,", "QuestDB.QuestDict.get(qId) if not qDNA: return qInt = qDNA.questInt i =", "from pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus", "len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i)) if len(quest.getSCWhereIsText(0))", "# Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14", "def __questAddSCChat(self, quest): qId = quest.questId qDNA = QuestDB.QuestDict.get(qId) if", "__questAddSCChat(self, quest): qId = quest.questId qDNA = QuestDB.QuestDict.get(qId) if not", "qDNA.questInt i = 0 for task in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0))", "if not q.isComplete(): self.__questAddSCChat(q) return def __questAddSCChat(self, quest): qId =", "0 for task in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i),", "PSpeedChatQuestMenu(SCMenu): __module__ = __name__ def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate',", "pirates.quest.QuestDNA import * class PSpeedChatQuestMenu(SCMenu): __module__ = __name__ def __init__(self):", "20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file name:", "self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self):", "pirates.quest.QuestStatus import * from pirates.quest.QuestDNA import * class PSpeedChatQuestMenu(SCMenu): __module__", "SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted',", "= quest if q is None: continue if not q.isComplete():", "quests: q = quest if q is None: continue if", "3.2.0 # Python bytecode 2.4 (62061) # Decompiled from: Python", "name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal import *", "self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self, quest,", "self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self) def", "import * from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest import Quest", "= 0 for task in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) > 2:", "= quest.questId qDNA = QuestDB.QuestDict.get(qId) if not qDNA: return qInt", "Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed,", "* from pirates.quest.QuestDNA import * class PSpeedChatQuestMenu(SCMenu): __module__ = __name__", "qDNA = QuestDB.QuestDict.get(qId) if not qDNA: return qInt = qDNA.questInt", "pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus import", "> 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i)) if len(quest.getSCHowToText(0)) >", "from pirates.quest.QuestDNA import * class PSpeedChatQuestMenu(SCMenu): __module__ = __name__ def", "self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self, quest, item=None, note=None):", "quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i))", "item=None, note=None): self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests() if quests is None:", "file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal import", "continue if not q.isComplete(): self.__questAddSCChat(q) return def __questAddSCChat(self, quest): qId", "# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30)", "(v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]", "from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal", "from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500", "version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled from:", "qInt, quest.giverId, 1, i)) if len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt,", "import * from pirates.quest.QuestStatus import * from pirates.quest.QuestDNA import *", "quest.giverId, 0, i)) if len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId,", "__module__ = __name__ def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh)", "* from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest import Quest from", "in quests: q = quest if q is None: continue", "import * class PSpeedChatQuestMenu(SCMenu): __module__ = __name__ def __init__(self): SCMenu.__init__(self)", "bit (Intel)] # Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import", "import SCStaticTextTerminal from pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal import *", "self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def", "__questMenuRefresh(self, quest, item=None, note=None): self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests() if quests", "not q.isComplete(): self.__questAddSCChat(q) return def __questAddSCChat(self, quest): qId = quest.questId", "task in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId,", "quest if q is None: continue if not q.isComplete(): self.__questAddSCChat(q)", "= qDNA.questInt i = 0 for task in quest.questDNA.getTasks(): if", "* from pirates.quest.QuestStatus import * from pirates.quest.QuestDNA import * class", "self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self, quest, item=None, note=None): self.clearMenu()", "32 bit (Intel)] # Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu", "2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16", "self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self,", "q = quest if q is None: continue if not", "def destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self, quest, item=None, note=None): self.clearMenu() quests", "import Quest from pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate import *", "SCStaticTextTerminal from pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal import * from", "q is None: continue if not q.isComplete(): self.__questAddSCChat(q) return def", "quest.questId qDNA = QuestDB.QuestDict.get(qId) if not qDNA: return qInt =", "from pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate", "pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate import", "bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep", "= __name__ def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate',", "pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal import * from", "is None: continue if not q.isComplete(): self.__questAddSCChat(q) return def __questAddSCChat(self,", "import * from pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus import *", "if not qDNA: return qInt = qDNA.questInt i = 0", "quests is None: return for quest in quests: q =", "> 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i)) if len(quest.getSCWhereIsText(0)) >", "16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded", "if q is None: continue if not q.isComplete(): self.__questAddSCChat(q) return", "self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self, quest, item=None,", "note=None): self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests() if quests is None: return", "__init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh)", "quest in quests: q = quest if q is None:", "self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i)) if len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i),", "None: return for quest in quests: q = quest if", "quest): qId = quest.questId qDNA = QuestDB.QuestDict.get(qId) if not qDNA:", "from pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus import * from pirates.quest.QuestDNA", "Quest from pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate import * from", "SCMenu from otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from", "def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete',", "quests = localAvatar.questStatus.getCurrentQuests() if quests is None: return for quest", "2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file", "otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal import" ]
[ "= Config.get_conf(self, identifier=806715409318936616) default_guild = { \"spotifyembedEnabled\": False, } self.config.register_guild(**default_guild)", "return if message.guild is None: return spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled()", "whurl == \"\": newHook = await ctx.channel.create_webhook(name=\"Webhook\") whurl = newHook.url", "embed link Can set asMyself to true/false, for sending as", "def on_message(self, message: discord.Message): if message.author.bot: return if message.webhook_id: return", "@commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions() async def setspotifyembed(self, ctx: commands.Context): \"\"\"Set Spotify", "Webhook, AsyncWebhookAdapter import re class Spotifyembed(commands.Cog): \"\"\"Automatically send a reply", "set asMyself to true/false, for sending as webhook\"\"\" spembedSplit =", "spembedMatches: spembedSplit = match.split('.com/') sendMsg += spembedSplit[0] + \".com/embed/\" +", "discord.Message): if message.author.bot: return if message.webhook_id: return if message.guild is", "= { \"spotifyembedEnabled\": False, } self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions() async", "spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands = re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands) >", "sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden: return await ctx.send(sendMsg) else:", "self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is not True: return # Ignore if", "re.findall(spembedFinder, message.clean_content) if len(spembedMatches) <= 0: return sendMsg = \"\"", "for match in spembedMatches: spembedSplit = match.split('.com/') sendMsg += spembedSplit[0]", "await ctx.send(sendMsg) else: return await ctx.send(\"An error occurred.\") @commands.Cog.listener() async", "Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async def setspembeddisable(self,", "len(spembedMatches) <= 0: return sendMsg = \"\" for match in", "# Make new webhook if one didn't exist if whurl", "if spotifyembedEnabled is not True: return # Ignore if we", "ctx.channel.webhooks() whurl = \"\" # Return if match for wh", "bot self.config = Config.get_conf(self, identifier=806715409318936616) default_guild = { \"spotifyembedEnabled\": False,", "whurl == \"\": newHook = await message.channel.create_webhook(name=\"Webhook\") whurl = newHook.url", "spembedSplit[0] + \".com/embed/\" + spembedSplit[1] if asMyself == False: return", "no spotify links in the trigger message spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\"", "len(spembedCommands) > 0: return # Ignore if we find no", "import discord from discord import Webhook, AsyncWebhookAdapter import re class", "# Ignore if we find no spotify links in the", "False, } self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions() async def setspotifyembed(self, ctx:", "Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url, ) except discord.errors.Forbidden:", "for mobile users who can finally listen to music samples", "bot made try: whooklist = await ctx.channel.webhooks() whurl = \"\"", "from Discord, without needing an account.\"\"\" def __init__(self, bot): self.bot", "= await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is not True: return #", "import asyncio import aiohttp import discord from discord import Webhook,", "without needing an account.\"\"\" def __init__(self, bot): self.bot = bot", "@commands.Cog.listener() async def on_message(self, message: discord.Message): if message.author.bot: return if", "self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async def setspembeddisable(self, ctx): \"\"\"Disable auto-responding", "\"\"\"Disable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\",", "return await ctx.send(\"An error occurred.\") @commands.Cog.listener() async def on_message(self, message:", "whurl = newHook.url async with aiohttp.ClientSession() as session: webhook =", "message.author.bot: return if message.webhook_id: return if message.guild is None: return", "\"\"\"Automatically send a reply to Spotify links with a link", "link to the embed preview. Convenient for mobile users who", "discord.Embed(color=(await ctx.embed_colour()), title=\"Guild Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await", "= bot self.config = Config.get_conf(self, identifier=806715409318936616) default_guild = { \"spotifyembedEnabled\":", "spembedCommands = re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands) > 0: return #", "Ignore if we find no spotify links in the trigger", "commands, checks import asyncio import aiohttp import discord from discord", "didn't exist if whurl == \"\": newHook = await ctx.channel.create_webhook(name=\"Webhook\")", "spotifyembed(self, ctx, spotifyLink, asMyself: bool=False): \"\"\"Return a Spotify embed link", "account.\"\"\" def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self,", "in whooklist: if self.bot.user == wh.user: whurl = wh.url #", "if we find no spotify links in the trigger message", "redbot.core import Config from redbot.core import Config, commands, checks import", "to the embed preview. Convenient for mobile users who can", "message: discord.Message): if message.author.bot: return if message.webhook_id: return if message.guild", "\"setspe\"]) @checks.guildowner_or_permissions() async def setspotifyembed(self, ctx: commands.Context): \"\"\"Set Spotify Embed", "async def setspembeddisable(self, ctx): \"\"\"Disable auto-responding to Spotify links\"\"\" await", "we find no spotify links in the trigger message spembedFinder", "# from redbot.core import Config from redbot.core import Config, commands,", "return spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is not True:", "checks import asyncio import aiohttp import discord from discord import", "if we find [p]spotifyembed in the trigger message spembedCommandIgnore =", ") except discord.errors.Forbidden: return await ctx.send(sendMsg) else: return await ctx.send(\"An", "we find [p]spotifyembed in the trigger message spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\"", "spotifyLink.split('.com/') sendMsg = spembedSplit[0] + \".com/embed/\" + spembedSplit[1] if asMyself", "reply to Spotify links with a link to the embed", "adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url, ) except discord.errors.Forbidden: return", "find [p]spotifyembed in the trigger message spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands", "from redbot.core import Config, commands, checks import asyncio import aiohttp", "self.bot.user == wh.user: whurl = wh.url # Make new webhook", "+= spembedSplit[0] + \".com/embed/\" + spembedSplit[1] + \"\\n\" # Find", "\"\"\"Enable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\")", "try: whooklist = await ctx.channel.webhooks() whurl = \"\" # Return", "match in spembedMatches: spembedSplit = match.split('.com/') sendMsg += spembedSplit[0] +", "to true/false, for sending as webhook\"\"\" spembedSplit = spotifyLink.split('.com/') sendMsg", "= await ctx.channel.webhooks() whurl = \"\" # Return if match", "bool=False): \"\"\"Return a Spotify embed link Can set asMyself to", "not ctx.invoked_subcommand: # Guild settings e = discord.Embed(color=(await ctx.embed_colour()), title=\"Guild", "== \"\": newHook = await message.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async", "spotifyLink, asMyself: bool=False): \"\"\"Return a Spotify embed link Can set", "is None: return spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is", "to Spotify links with a link to the embed preview.", "spembedSplit[0] + \".com/embed/\" + spembedSplit[1] + \"\\n\" # Find a", "+ spembedSplit[1] + \"\\n\" # Find a webhook that the", "@setspotifyembed.command(name=\"disable\") async def setspembeddisable(self, ctx): \"\"\"Disable auto-responding to Spotify links\"\"\"", "webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url, )", "@checks.guildowner_or_permissions() async def setspotifyembed(self, ctx: commands.Context): \"\"\"Set Spotify Embed settings\"\"\"", "await ctx.send(\"An error occurred.\") @commands.Cog.listener() async def on_message(self, message: discord.Message):", "settings e = discord.Embed(color=(await ctx.embed_colour()), title=\"Guild Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await", "> 0: return # Ignore if we find no spotify", "await ctx.channel.webhooks() whurl = \"\" # Return if match for", "Guild settings e = discord.Embed(color=(await ctx.embed_colour()), title=\"Guild Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\",", "link Can set asMyself to true/false, for sending as webhook\"\"\"", "exist if whurl == \"\": newHook = await ctx.channel.create_webhook(name=\"Webhook\") whurl", "await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async def setspembeddisable(self, ctx): \"\"\"Disable", "Make new webhook if one didn't exist if whurl ==", "e = discord.Embed(color=(await ctx.embed_colour()), title=\"Guild Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()),", "= discord.Embed(color=(await ctx.embed_colour()), title=\"Guild Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False)", "0: return # Ignore if we find no spotify links", "an account.\"\"\" def __init__(self, bot): self.bot = bot self.config =", "webhook that the bot made try: whooklist = await ctx.channel.webhooks()", "if one didn't exist if whurl == \"\": newHook =", "message spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands = re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands)", "+ \".com/embed/\" + spembedSplit[1] if asMyself == False: return await", "if len(spembedMatches) <= 0: return sendMsg = \"\" for match", "a webhook that the bot made try: whooklist = await", "description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async def", "False: return await ctx.send(sendMsg) elif asMyself == True: # Find", "webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url, ) except discord.errors.Forbidden: return await message.channel.send(sendMsg)", "= await message.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession() as", "setspembeddisable(self, ctx): \"\"\"Disable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await", "aiohttp.ClientSession() as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg,", "webhook that the bot made try: whooklist = await message.channel.webhooks()", "with a link to the embed preview. Convenient for mobile", "in spembedMatches: spembedSplit = match.split('.com/') sendMsg += spembedSplit[0] + \".com/embed/\"", "return # Ignore if we find [p]spotifyembed in the trigger", "Convenient for mobile users who can finally listen to music", "spotify links in the trigger message spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches", "commands.Context): \"\"\"Set Spotify Embed settings\"\"\" if not ctx.invoked_subcommand: # Guild", "discord import Webhook, AsyncWebhookAdapter import re class Spotifyembed(commands.Cog): \"\"\"Automatically send", "sendMsg = spembedSplit[0] + \".com/embed/\" + spembedSplit[1] if asMyself ==", "\"\" # Return if match for wh in whooklist: if", "links in the trigger message spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches =", "\"spotifyembedEnabled\": False, } self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions() async def setspotifyembed(self,", "None: return spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is not", "newHook.url async with aiohttp.ClientSession() as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session))", "embed preview. Convenient for mobile users who can finally listen", "return await ctx.send(sendMsg) else: return await ctx.send(\"An error occurred.\") @commands.Cog.listener()", "await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is not True: return # Ignore", "+ \"\\n\" # Find a webhook that the bot made", "ctx.send(sendMsg) elif asMyself == True: # Find a webhook that", "spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches = re.findall(spembedFinder, message.clean_content) if len(spembedMatches) <=", "bot): self.bot = bot self.config = Config.get_conf(self, identifier=806715409318936616) default_guild =", "didn't exist if whurl == \"\": newHook = await message.channel.create_webhook(name=\"Webhook\")", "Spotify links with a link to the embed preview. Convenient", "ctx.send(\"An error occurred.\") @commands.Cog.listener() async def on_message(self, message: discord.Message): if", "Ignore if we find [p]spotifyembed in the trigger message spembedCommandIgnore", "aiohttp import discord from discord import Webhook, AsyncWebhookAdapter import re", "asMyself to true/false, for sending as webhook\"\"\" spembedSplit = spotifyLink.split('.com/')", "message.webhook_id: return if message.guild is None: return spotifyembedEnabled = await", "avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden: return await ctx.send(sendMsg) else: return await", "import Config from redbot.core import Config, commands, checks import asyncio", "else: return await ctx.send(\"An error occurred.\") @commands.Cog.listener() async def on_message(self,", "a Spotify embed link Can set asMyself to true/false, for", "self.config = Config.get_conf(self, identifier=806715409318936616) default_guild = { \"spotifyembedEnabled\": False, }", "users who can finally listen to music samples from Discord,", "webhook\"\"\" spembedSplit = spotifyLink.split('.com/') sendMsg = spembedSplit[0] + \".com/embed/\" +", "True: return # Ignore if we find [p]spotifyembed in the", "who can finally listen to music samples from Discord, without", "message.clean_content) if len(spembedMatches) <= 0: return sendMsg = \"\" for", "@setspotifyembed.command(name=\"enable\") async def setspembedenable(self, ctx): \"\"\"Enable auto-responding to Spotify links\"\"\"", "a link to the embed preview. Convenient for mobile users", "can finally listen to music samples from Discord, without needing", "Embed settings\"\"\" if not ctx.invoked_subcommand: # Guild settings e =", "= re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands) > 0: return # Ignore", "match for wh in whooklist: if self.bot.user == wh.user: whurl", "Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden:", "webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden: return await ctx.send(sendMsg)", "ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async def setspembedenable(self, ctx): \"\"\"Enable auto-responding to Spotify", "newHook = await message.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession()", "made try: whooklist = await message.channel.webhooks() whurl = \"\" #", "# Find a webhook that the bot made try: whooklist", "asyncio import aiohttp import discord from discord import Webhook, AsyncWebhookAdapter", "re class Spotifyembed(commands.Cog): \"\"\"Automatically send a reply to Spotify links", "message.clean_content) if len(spembedCommands) > 0: return # Ignore if we", "Config from redbot.core import Config, commands, checks import asyncio import", "whooklist = await ctx.channel.webhooks() whurl = \"\" # Return if", "finally listen to music samples from Discord, without needing an", "in the trigger message spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches = re.findall(spembedFinder,", "await message.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession() as session:", "if message.guild is None: return spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled() if", "auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async", "def setspembeddisable(self, ctx): \"\"\"Disable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False)", "await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async def setspembeddisable(self, ctx): \"\"\"Disable auto-responding to", "setspotifyembed(self, ctx: commands.Context): \"\"\"Set Spotify Embed settings\"\"\" if not ctx.invoked_subcommand:", "True: # Find a webhook that the bot made try:", "settings\"\"\" if not ctx.invoked_subcommand: # Guild settings e = discord.Embed(color=(await", "ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async def setspembeddisable(self, ctx): \"\"\"Disable auto-responding to Spotify", "Spotify embed link Can set asMyself to true/false, for sending", "Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"]) async def", "\"\": newHook = await message.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with", "re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands) > 0: return # Ignore if", "e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async def setspembedenable(self,", "find no spotify links in the trigger message spembedFinder =", "return await ctx.send(sendMsg) elif asMyself == True: # Find a", "elif asMyself == True: # Find a webhook that the", "async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False): \"\"\"Return a Spotify", "mobile users who can finally listen to music samples from", "async def setspotifyembed(self, ctx: commands.Context): \"\"\"Set Spotify Embed settings\"\"\" if", "whurl = \"\" # Return if match for wh in", "self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"]) async def spotifyembed(self, ctx, spotifyLink,", "match.split('.com/') sendMsg += spembedSplit[0] + \".com/embed/\" + spembedSplit[1] + \"\\n\"", "ctx): \"\"\"Disable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\")", "setspembedenable(self, ctx): \"\"\"Enable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await", "try: whooklist = await message.channel.webhooks() whurl = \"\" # Return", "links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"]) async def spotifyembed(self,", "\".com/embed/\" + spembedSplit[1] if asMyself == False: return await ctx.send(sendMsg)", "= newHook.url async with aiohttp.ClientSession() as session: webhook = Webhook.from_url(whurl,", "self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions() async def setspotifyembed(self, ctx: commands.Context): \"\"\"Set", "Find a webhook that the bot made try: whooklist =", "as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=ctx.author.display_name,", "newHook = await ctx.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession()", "whurl = wh.url # Make new webhook if one didn't", "a reply to Spotify links with a link to the", "[p]spotifyembed in the trigger message spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands =", "\"spe\"]) async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False): \"\"\"Return a", "default_guild = { \"spotifyembedEnabled\": False, } self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions()", "true/false, for sending as webhook\"\"\" spembedSplit = spotifyLink.split('.com/') sendMsg =", "import re class Spotifyembed(commands.Cog): \"\"\"Automatically send a reply to Spotify", "= await message.channel.webhooks() whurl = \"\" # Return if match", "class Spotifyembed(commands.Cog): \"\"\"Automatically send a reply to Spotify links with", "in the trigger message spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands = re.findall(spembedCommandIgnore,", "def setspembedenable(self, ctx): \"\"\"Enable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True)", "for wh in whooklist: if self.bot.user == wh.user: whurl =", "title=\"Guild Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\")", "ctx.invoked_subcommand: # Guild settings e = discord.Embed(color=(await ctx.embed_colour()), title=\"Guild Settings\",", "= match.split('.com/') sendMsg += spembedSplit[0] + \".com/embed/\" + spembedSplit[1] +", "exist if whurl == \"\": newHook = await message.channel.create_webhook(name=\"Webhook\") whurl", "Spotify Embed settings\"\"\" if not ctx.invoked_subcommand: # Guild settings e", "the bot made try: whooklist = await ctx.channel.webhooks() whurl =", "ctx.embed_colour()), title=\"Guild Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e)", "sendMsg = \"\" for match in spembedMatches: spembedSplit = match.split('.com/')", "discord from discord import Webhook, AsyncWebhookAdapter import re class Spotifyembed(commands.Cog):", "await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async def setspembedenable(self, ctx): \"\"\"Enable auto-responding to", "auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"])", "async def setspembedenable(self, ctx): \"\"\"Enable auto-responding to Spotify links\"\"\" await", "needing an account.\"\"\" def __init__(self, bot): self.bot = bot self.config", "= r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands = re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands) > 0:", "} self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions() async def setspotifyembed(self, ctx: commands.Context):", "session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url,", "return sendMsg = \"\" for match in spembedMatches: spembedSplit =", "self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async def setspembedenable(self, ctx): \"\"\"Enable", "username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden: return await ctx.send(sendMsg) else: return", "session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url,", "sendMsg += spembedSplit[0] + \".com/embed/\" + spembedSplit[1] + \"\\n\" #", "asMyself == True: # Find a webhook that the bot", "Can set asMyself to true/false, for sending as webhook\"\"\" spembedSplit", "await webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden: return await", "\".com/embed/\" + spembedSplit[1] + \"\\n\" # Find a webhook that", "Settings\", description=\"\") e.add_field(name=\"spotifyembedEnabled\", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async", "await ctx.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession() as session:", "trigger message spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands = re.findall(spembedCommandIgnore, message.clean_content) if", "send a reply to Spotify links with a link to", "links with a link to the embed preview. Convenient for", "await message.channel.webhooks() whurl = \"\" # Return if match for", "+ spembedSplit[1] if asMyself == False: return await ctx.send(sendMsg) elif", "is not True: return # Ignore if we find [p]spotifyembed", "ctx.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession() as session: webhook", "samples from Discord, without needing an account.\"\"\" def __init__(self, bot):", "ctx): \"\"\"Enable auto-responding to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\")", "await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"]) async def spotifyembed(self, ctx, spotifyLink, asMyself:", "await webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url, ) except discord.errors.Forbidden: return await", "{ \"spotifyembedEnabled\": False, } self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"]) @checks.guildowner_or_permissions() async def", "await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"]) async def spotifyembed(self, ctx,", "webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, )", "that the bot made try: whooklist = await ctx.channel.webhooks() whurl", "made try: whooklist = await ctx.channel.webhooks() whurl = \"\" #", "= \"\" for match in spembedMatches: spembedSplit = match.split('.com/') sendMsg", "value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async def setspembedenable(self, ctx):", "== False: return await ctx.send(sendMsg) elif asMyself == True: #", "music samples from Discord, without needing an account.\"\"\" def __init__(self,", "wh in whooklist: if self.bot.user == wh.user: whurl = wh.url", "for sending as webhook\"\"\" spembedSplit = spotifyLink.split('.com/') sendMsg = spembedSplit[0]", "new webhook if one didn't exist if whurl == \"\":", "to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async def", "if whurl == \"\": newHook = await ctx.channel.create_webhook(name=\"Webhook\") whurl =", "from redbot.core import Config from redbot.core import Config, commands, checks", "= await ctx.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession() as", "as webhook\"\"\" spembedSplit = spotifyLink.split('.com/') sendMsg = spembedSplit[0] + \".com/embed/\"", "listen to music samples from Discord, without needing an account.\"\"\"", "inline=False) await ctx.send(embed=e) @setspotifyembed.command(name=\"enable\") async def setspembedenable(self, ctx): \"\"\"Enable auto-responding", "occurred.\") @commands.Cog.listener() async def on_message(self, message: discord.Message): if message.author.bot: return", "__init__(self, bot): self.bot = bot self.config = Config.get_conf(self, identifier=806715409318936616) default_guild", "= re.findall(spembedFinder, message.clean_content) if len(spembedMatches) <= 0: return sendMsg =", "Config, commands, checks import asyncio import aiohttp import discord from", "webhook if one didn't exist if whurl == \"\": newHook", "r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands = re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands) > 0: return", "spotifyembedEnabled is not True: return # Ignore if we find", "= Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except", "message.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with aiohttp.ClientSession() as session: webhook", "wh.user: whurl = wh.url # Make new webhook if one", "spembedMatches = re.findall(spembedFinder, message.clean_content) if len(spembedMatches) <= 0: return sendMsg", "Config.get_conf(self, identifier=806715409318936616) default_guild = { \"spotifyembedEnabled\": False, } self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\",", "ctx: commands.Context): \"\"\"Set Spotify Embed settings\"\"\" if not ctx.invoked_subcommand: #", "the bot made try: whooklist = await message.channel.webhooks() whurl =", "Discord, without needing an account.\"\"\" def __init__(self, bot): self.bot =", "Spotifyembed(commands.Cog): \"\"\"Automatically send a reply to Spotify links with a", "+ \".com/embed/\" + spembedSplit[1] + \"\\n\" # Find a webhook", "AsyncWebhookAdapter import re class Spotifyembed(commands.Cog): \"\"\"Automatically send a reply to", "0: return sendMsg = \"\" for match in spembedMatches: spembedSplit", "= Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url, ) except", "ctx.send(sendMsg) else: return await ctx.send(\"An error occurred.\") @commands.Cog.listener() async def", "asMyself: bool=False): \"\"\"Return a Spotify embed link Can set asMyself", "redbot.core import Config, commands, checks import asyncio import aiohttp import", "spembedSplit = match.split('.com/') sendMsg += spembedSplit[0] + \".com/embed/\" + spembedSplit[1]", "# Ignore if we find [p]spotifyembed in the trigger message", "= \"\" # Return if match for wh in whooklist:", "to Spotify links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"]) async", "ctx.message.add_reaction(\"✅\") @commands.command(aliases=[\"spembed\", \"spe\"]) async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False):", "discord.errors.Forbidden: return await ctx.send(sendMsg) else: return await ctx.send(\"An error occurred.\")", "if match for wh in whooklist: if self.bot.user == wh.user:", "if len(spembedCommands) > 0: return # Ignore if we find", "preview. Convenient for mobile users who can finally listen to", "the embed preview. Convenient for mobile users who can finally", "to music samples from Discord, without needing an account.\"\"\" def", "<= 0: return sendMsg = \"\" for match in spembedMatches:", "\"\"\"Set Spotify Embed settings\"\"\" if not ctx.invoked_subcommand: # Guild settings", "one didn't exist if whurl == \"\": newHook = await", "as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=message.author.display_name,", "error occurred.\") @commands.Cog.listener() async def on_message(self, message: discord.Message): if message.author.bot:", "= spotifyLink.split('.com/') sendMsg = spembedSplit[0] + \".com/embed/\" + spembedSplit[1] if", "def setspotifyembed(self, ctx: commands.Context): \"\"\"Set Spotify Embed settings\"\"\" if not", "from discord import Webhook, AsyncWebhookAdapter import re class Spotifyembed(commands.Cog): \"\"\"Automatically", "trigger message spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches = re.findall(spembedFinder, message.clean_content) if", "spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is not True: return", "identifier=806715409318936616) default_guild = { \"spotifyembedEnabled\": False, } self.config.register_guild(**default_guild) @commands.group(aliases=[\"setspembed\", \"setspe\"])", "import Config, commands, checks import asyncio import aiohttp import discord", "message spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches = re.findall(spembedFinder, message.clean_content) if len(spembedMatches)", "def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self, identifier=806715409318936616)", "spembedSplit[1] if asMyself == False: return await ctx.send(sendMsg) elif asMyself", "\"\\n\" # Find a webhook that the bot made try:", "if asMyself == False: return await ctx.send(sendMsg) elif asMyself ==", "whooklist = await message.channel.webhooks() whurl = \"\" # Return if", "await ctx.send(sendMsg) elif asMyself == True: # Find a webhook", "the trigger message spembedFinder = r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches = re.findall(spembedFinder, message.clean_content)", "= wh.url # Make new webhook if one didn't exist", "if not ctx.invoked_subcommand: # Guild settings e = discord.Embed(color=(await ctx.embed_colour()),", "@commands.command(aliases=[\"spembed\", \"spe\"]) async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False): \"\"\"Return", "import Webhook, AsyncWebhookAdapter import re class Spotifyembed(commands.Cog): \"\"\"Automatically send a", "# Guild settings e = discord.Embed(color=(await ctx.embed_colour()), title=\"Guild Settings\", description=\"\")", "not True: return # Ignore if we find [p]spotifyembed in", "\"\"\"Return a Spotify embed link Can set asMyself to true/false,", "links\"\"\" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction(\"✅\") @setspotifyembed.command(name=\"disable\") async def setspembeddisable(self, ctx):", "def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False): \"\"\"Return a Spotify embed", "\"\": newHook = await ctx.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async with", "if message.author.bot: return if message.webhook_id: return if message.guild is None:", "== \"\": newHook = await ctx.channel.create_webhook(name=\"Webhook\") whurl = newHook.url async", "spembedSplit[1] + \"\\n\" # Find a webhook that the bot", "import aiohttp import discord from discord import Webhook, AsyncWebhookAdapter import", "on_message(self, message: discord.Message): if message.author.bot: return if message.webhook_id: return if", "== True: # Find a webhook that the bot made", "message.channel.webhooks() whurl = \"\" # Return if match for wh", "with aiohttp.ClientSession() as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send(", "return if message.webhook_id: return if message.guild is None: return spotifyembedEnabled", "the trigger message spembedCommandIgnore = r\"^\\S{1,9}(spotifyembed|spembed|spe)(?=\\s|$)\" spembedCommands = re.findall(spembedCommandIgnore, message.clean_content)", "message.guild is None: return spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled", "if message.webhook_id: return if message.guild is None: return spotifyembedEnabled =", "= r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches = re.findall(spembedFinder, message.clean_content) if len(spembedMatches) <= 0:", "bot made try: whooklist = await message.channel.webhooks() whurl = \"\"", "self.bot = bot self.config = Config.get_conf(self, identifier=806715409318936616) default_guild = {", "== wh.user: whurl = wh.url # Make new webhook if", "if whurl == \"\": newHook = await message.channel.create_webhook(name=\"Webhook\") whurl =", "sending as webhook\"\"\" spembedSplit = spotifyLink.split('.com/') sendMsg = spembedSplit[0] +", "async with aiohttp.ClientSession() as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await", "adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden: return", "wh.url # Make new webhook if one didn't exist if", "Return if match for wh in whooklist: if self.bot.user ==", "if self.bot.user == wh.user: whurl = wh.url # Make new", "# Return if match for wh in whooklist: if self.bot.user", "whooklist: if self.bot.user == wh.user: whurl = wh.url # Make", "async def on_message(self, message: discord.Message): if message.author.bot: return if message.webhook_id:", "ctx, spotifyLink, asMyself: bool=False): \"\"\"Return a Spotify embed link Can", "return # Ignore if we find no spotify links in", "that the bot made try: whooklist = await message.channel.webhooks() whurl", "= spembedSplit[0] + \".com/embed/\" + spembedSplit[1] if asMyself == False:", "except discord.errors.Forbidden: return await ctx.send(sendMsg) else: return await ctx.send(\"An error", "\"\" for match in spembedMatches: spembedSplit = match.split('.com/') sendMsg +=", "spembedSplit = spotifyLink.split('.com/') sendMsg = spembedSplit[0] + \".com/embed/\" + spembedSplit[1]", "r\"https\\:\\/\\/open\\.spotify\\.com\\/\\w{4,12}\\/\\w{14,26}(?=\\?|$|\\s)\" spembedMatches = re.findall(spembedFinder, message.clean_content) if len(spembedMatches) <= 0: return", "asMyself == False: return await ctx.send(sendMsg) elif asMyself == True:" ]
[ "/ sizeof_int) unpacked = struct.unpack(\"{}I\".format(int_count), bytes) accum = 0 for", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "to use in the hashed seed. \"\"\" if seed is", "* 8 * i) * val return accum def _int_list_from_bigint(bigint):", "copies or substantial portions of the Software. # #THE SOFTWARE", "= _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a = a % 2**(8", "[] while bigint > 0: bigint, mod = divmod(bigint, 2", "good enough to get rid of simple correlations.) Args: seed", "num = color2num[color] if highlight: num += 10 attr.append(str(num)) if", "notice shall be included in all copies or substantial portions", "num += 10 attr.append(str(num)) if bold: attr.append('1') attrs = ';'.join(attr)", "return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): \"\"\"Create a strong random seed.", "0: bigint, mod = divmod(bigint, 2 ** 32) ints.append(mod) return", "sizeof_int = 4 padding = sizeof_int - len(bytes) % sizeof_int", "PRNG's active at once. (Most commonly, because the environment is", "import os import struct def colorize(string, color, bold=False, highlight =", "seed using the system time, which might be non-robust especially", "TODO: don't hardcode sizeof_int here def _bigint_from_bytes(bytes): sizeof_int = 4", "#Copyright (c) 2020 DATA Lab at Texas A&M University #Copyright", "accum += 2 ** (sizeof_int * 8 * i) *", "= int(len(bytes) / sizeof_int) unpacked = struct.unpack(\"{}I\".format(int_count), bytes) accum =", "10 attr.append(str(num)) if bold: attr.append('1') attrs = ';'.join(attr) return '\\x1b[%sm%s\\x1b[0m'", "of bytes to use in the hashed seed. \"\"\" if", "the presence of concurrency. Args: a (Optional[int, str]): None seeds", "= a % 2**(8 * max_bytes) else: raise error.Error('Invalid type", "return accum def _int_list_from_bigint(bigint): # Special case 0 if bigint", "specific randomness source. max_bytes: Maximum number of bytes to use", "if bigint < 0: raise error.Error('Seed must be non-negative, not", "args), 'red')) def np_random(seed=None): if seed is not None and", "environment is running in multiple processes.) There's literature indicating that", "i, val in enumerate(unpacked): accum += 2 ** (sizeof_int *", "max_bytes: Maximum number of bytes to use in the seed.", "{}'.format(seed)) seed = create_seed(seed) rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng,", "\"\"\" # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None: a", "Software without restriction, including without limitation the rights to use,", "many PRNG's active at once. (Most commonly, because the environment", "<= seed): raise error.Error('Seed must be a non-negative integer or", "0 <= seed): raise error.Error('Seed must be a non-negative integer", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "copies of the Software, and to permit persons to whom", "% args), 'red')) def np_random(seed=None): if seed is not None", "hereby granted, free of charge, to any person obtaining a", "number of bytes to use in the seed. \"\"\" #", "to deal in the Software without restriction, including without limitation", "\"\"\"Create a strong random seed. Otherwise, Python 2 would seed", "Lab at Texas A&M University #Copyright (c) 2016 OpenAI (https://openai.com)", "hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): \"\"\"Create a", "bytes) accum = 0 for i, val in enumerate(unpacked): accum", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "seed): raise error.Error('Seed must be a non-negative integer or omitted,", "green, yellow, blue, magenta, cyan, white, crimson \"\"\" attr =", "using them. (This scheme is likely not crypto-strength, but it", "in the hashed seed. \"\"\" if seed is None: seed", "# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None: a =", "don't hardcode sizeof_int here def _bigint_from_bytes(bytes): sizeof_int = 4 padding", "bigint > 0: bigint, mod = divmod(bigint, 2 ** 32)", "USE OR OTHER DEALINGS IN THE SOFTWARE. import hashlib import", "MIT License # #Copyright (c) 2020 DATA Lab at Texas", "indicating that having linear correlations between seeds of multiple PRNG's", "seed is None: seed = create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "sizeof_int - len(bytes) % sizeof_int bytes += b'\\0' * padding", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "sanity we hash the seeds before using them. (This scheme", "modify, merge, publish, distribute, sublicense, and/or sell copies of the", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "persons to whom the Software is furnished to do so,", "limitation the rights to use, copy, modify, merge, publish, distribute,", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "def _int_list_from_bigint(bigint): # Special case 0 if bigint < 0:", "#Copyright (c) 2016 OpenAI (https://openai.com) # #Permission is hereby granted,", "# TODO: don't hardcode sizeof_int here def _bigint_from_bytes(bytes): sizeof_int =", "print colorized text. Valid colors: gray, red, green, yellow, blue,", "seed = create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None,", "ints = [] while bigint > 0: bigint, mod =", "in the presence of concurrency. Args: a (Optional[int, str]): None", "non-negative integer or omitted, not {}'.format(seed)) seed = create_seed(seed) rng", "terminal color codes to print colorized text. Valid colors: gray,", "%s'%('ERROR', msg % args), 'red')) def np_random(seed=None): if seed is", "if a is None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str):", "padding int_count = int(len(bytes) / sizeof_int) unpacked = struct.unpack(\"{}I\".format(int_count), bytes)", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "University #Copyright (c) 2016 OpenAI (https://openai.com) # #Permission is hereby", "to use in the seed. \"\"\" # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py", "Software is furnished to do so, subject to the following", "raise error.Error('Seed must be a non-negative integer or omitted, not", "subject to the following conditions: # #The above copyright notice", "= [] num = color2num[color] if highlight: num += 10", "2 ** (sizeof_int * 8 * i) * val return", "sell copies of the Software, and to permit persons to", "codes to print colorized text. Valid colors: gray, red, green,", "* val return accum def _int_list_from_bigint(bigint): # Special case 0", "np_random(seed=None): if seed is not None and not (isinstance(seed, int)", "= a.encode('utf8') a += hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a,", "included in all copies or substantial portions of the Software.", "License # #Copyright (c) 2020 DATA Lab at Texas A&M", "np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed def hash_seed(seed=None, max_bytes=8): \"\"\"Any given", "substantial portions of the Software. # #THE SOFTWARE IS PROVIDED", "if seed is not None and not (isinstance(seed, int) and", "a strong random seed. Otherwise, Python 2 would seed using", "struct def colorize(string, color, bold=False, highlight = False): \"\"\"Return string", "bold=False, highlight = False): \"\"\"Return string surrounded by appropriate terminal", "is not None and not (isinstance(seed, int) and 0 <=", "between seeds of multiple PRNG's can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/", "hash_seed(seed=None, max_bytes=8): \"\"\"Any given evaluation is likely to have many", "outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we hash the", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "following conditions: # #The above copyright notice and this permission", "we hash the seeds before using them. (This scheme is", "max_bytes: Maximum number of bytes to use in the hashed", "in multiple processes.) There's literature indicating that having linear correlations", "furnished to do so, subject to the following conditions: #", "http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we hash the seeds before", "_bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a = a.encode('utf8') a += hashlib.sha512(a).digest()", "hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): \"\"\"Create a strong random", "sizeof_int) unpacked = struct.unpack(\"{}I\".format(int_count), bytes) accum = 0 for i,", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "cyan, white, crimson \"\"\" attr = [] num = color2num[color]", "DATA Lab at Texas A&M University #Copyright (c) 2016 OpenAI", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "Thus, for sanity we hash the seeds before using them.", "publish, distribute, sublicense, and/or sell copies of the Software, and", "of bytes to use in the seed. \"\"\" # Adapted", "(c) 2020 DATA Lab at Texas A&M University #Copyright (c)", "conditions: # #The above copyright notice and this permission notice", "not {}'.format(bigint)) elif bigint == 0: return [0] ints =", "hashed seed. \"\"\" if seed is None: seed = create_seed(max_bytes=max_bytes)", "\"Software\"), to deal in the Software without restriction, including without", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "Texas A&M University #Copyright (c) 2016 OpenAI (https://openai.com) # #Permission", "# #Copyright (c) 2020 DATA Lab at Texas A&M University", "2 would seed using the system time, which might be", "> 0: bigint, mod = divmod(bigint, 2 ** 32) ints.append(mod)", "{}'.format(bigint)) elif bigint == 0: return [0] ints = []", "len(bytes) % sizeof_int bytes += b'\\0' * padding int_count =", "be included in all copies or substantial portions of the", "seed def hash_seed(seed=None, max_bytes=8): \"\"\"Any given evaluation is likely to", "operating system specific randomness source. max_bytes: Maximum number of bytes", "(sizeof_int * 8 * i) * val return accum def", "https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a,", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity", "bytes to use in the seed. \"\"\" # Adapted from", "be non-negative, not {}'.format(bigint)) elif bigint == 0: return [0]", "There's literature indicating that having linear correlations between seeds of", "IN THE SOFTWARE. import hashlib import numpy as np import", "must be a non-negative integer or omitted, not {}'.format(seed)) seed", "the following conditions: # #The above copyright notice and this", "(Optional[int]): None seeds from an operating system specific randomness source.", "not {}'.format(seed)) seed = create_seed(seed) rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return", "colorized text. Valid colors: gray, red, green, yellow, blue, magenta,", "A&M University #Copyright (c) 2016 OpenAI (https://openai.com) # #Permission is", "0: return [0] ints = [] while bigint > 0:", "the hashed seed. \"\"\" if seed is None: seed =", "files (the \"Software\"), to deal in the Software without restriction,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "== 0: return [0] ints = [] while bigint >", "if bold: attr.append('1') attrs = ';'.join(attr) return '\\x1b[%sm%s\\x1b[0m' % (attrs,", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "scheme is likely not crypto-strength, but it should be good", "2**(8 * max_bytes) else: raise error.Error('Invalid type for seed: {}", "import struct def colorize(string, color, bold=False, highlight = False): \"\"\"Return", "bigint < 0: raise error.Error('Seed must be non-negative, not {}'.format(bigint))", "software and associated documentation files (the \"Software\"), to deal in", "bytes to use in the hashed seed. \"\"\" if seed", "notice and this permission notice shall be included in all", "+= hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a =", "but it should be good enough to get rid of", "is hereby granted, free of charge, to any person obtaining", "attr = [] num = color2num[color] if highlight: num +=", "elif bigint == 0: return [0] ints = [] while", "an operating system specific randomness source. max_bytes: Maximum number of", "correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we", "Otherwise, Python 2 would seed using the system time, which", "create_seed(seed) rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed def hash_seed(seed=None,", "seed. \"\"\" if seed is None: seed = create_seed(max_bytes=max_bytes) hash", "the Software without restriction, including without limitation the rights to", "to have many PRNG's active at once. (Most commonly, because", "rng, seed def hash_seed(seed=None, max_bytes=8): \"\"\"Any given evaluation is likely", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "(https://openai.com) # #Permission is hereby granted, free of charge, to", "None and not (isinstance(seed, int) and 0 <= seed): raise", "and/or sell copies of the Software, and to permit persons", "permit persons to whom the Software is furnished to do", "enumerate(unpacked): accum += 2 ** (sizeof_int * 8 * i)", "seed is not None and not (isinstance(seed, int) and 0", "any person obtaining a copy of this software and associated", "int_count = int(len(bytes) / sizeof_int) unpacked = struct.unpack(\"{}I\".format(int_count), bytes) accum", "to do so, subject to the following conditions: # #The", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "0 if bigint < 0: raise error.Error('Seed must be non-negative,", "create_seed(a=None, max_bytes=8): \"\"\"Create a strong random seed. Otherwise, Python 2", "i) * val return accum def _int_list_from_bigint(bigint): # Special case", "** (sizeof_int * 8 * i) * val return accum", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): \"\"\"Create", "Software. # #THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "it should be good enough to get rid of simple", "seeds from an operating system specific randomness source. max_bytes: Maximum", "% (attrs, string) def error(msg, *args): print(colorize('%s: %s'%('ERROR', msg %", "8 * i) * val return accum def _int_list_from_bigint(bigint): #", "white, crimson \"\"\" attr = [] num = color2num[color] if", "copy of this software and associated documentation files (the \"Software\"),", "Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None: a = _bigint_from_bytes(os.urandom(max_bytes))", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "by appropriate terminal color codes to print colorized text. Valid", "including without limitation the rights to use, copy, modify, merge,", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "is likely not crypto-strength, but it should be good enough", "while bigint > 0: bigint, mod = divmod(bigint, 2 **", "def _bigint_from_bytes(bytes): sizeof_int = 4 padding = sizeof_int - len(bytes)", "2020 DATA Lab at Texas A&M University #Copyright (c) 2016", "before using them. (This scheme is likely not crypto-strength, but", "or omitted, not {}'.format(seed)) seed = create_seed(seed) rng = np.random.RandomState()", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "return rng, seed def hash_seed(seed=None, max_bytes=8): \"\"\"Any given evaluation is", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "False): \"\"\"Return string surrounded by appropriate terminal color codes to", "concurrency. Args: a (Optional[int, str]): None seeds from an operating", "None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a = a.encode('utf8')", "= ';'.join(attr) return '\\x1b[%sm%s\\x1b[0m' % (attrs, string) def error(msg, *args):", "0 for i, val in enumerate(unpacked): accum += 2 **", "to the following conditions: # #The above copyright notice and", "(This scheme is likely not crypto-strength, but it should be", "strong random seed. Otherwise, Python 2 would seed using the", "#The MIT License # #Copyright (c) 2020 DATA Lab at", "rid of simple correlations.) Args: seed (Optional[int]): None seeds from", "using the system time, which might be non-robust especially in", "if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') attrs", "having linear correlations between seeds of multiple PRNG's can correlate", "max_bytes=8): \"\"\"Create a strong random seed. Otherwise, Python 2 would", "without limitation the rights to use, copy, modify, merge, publish,", "def hash_seed(seed=None, max_bytes=8): \"\"\"Any given evaluation is likely to have", "crimson \"\"\" attr = [] num = color2num[color] if highlight:", "[0] ints = [] while bigint > 0: bigint, mod", "hashlib import numpy as np import os import struct def", "and not (isinstance(seed, int) and 0 <= seed): raise error.Error('Seed", "have many PRNG's active at once. (Most commonly, because the", "#Permission is hereby granted, free of charge, to any person", "of multiple PRNG's can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928", "restriction, including without limitation the rights to use, copy, modify,", "a)) return a # TODO: don't hardcode sizeof_int here def", "should be good enough to get rid of simple correlations.)", "is None: seed = create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes])", "to permit persons to whom the Software is furnished to", "\"\"\" attr = [] num = color2num[color] if highlight: num", "do so, subject to the following conditions: # #The above", "surrounded by appropriate terminal color codes to print colorized text.", "max_bytes) else: raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))", "numpy as np import os import struct def colorize(string, color,", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "color codes to print colorized text. Valid colors: gray, red,", "OTHER DEALINGS IN THE SOFTWARE. import hashlib import numpy as", "bytes += b'\\0' * padding int_count = int(len(bytes) / sizeof_int)", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "sizeof_int here def _bigint_from_bytes(bytes): sizeof_int = 4 padding = sizeof_int", "deal in the Software without restriction, including without limitation the", "\"\"\"Any given evaluation is likely to have many PRNG's active", "+= 2 ** (sizeof_int * 8 * i) * val", "sizeof_int bytes += b'\\0' * padding int_count = int(len(bytes) /", "< 0: raise error.Error('Seed must be non-negative, not {}'.format(bigint)) elif", "\"\"\"Return string surrounded by appropriate terminal color codes to print", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we hash the seeds", "be non-robust especially in the presence of concurrency. Args: a", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "return a # TODO: don't hardcode sizeof_int here def _bigint_from_bytes(bytes):", "a += hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a", "distribute, sublicense, and/or sell copies of the Software, and to", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "string surrounded by appropriate terminal color codes to print colorized", "enough to get rid of simple correlations.) Args: seed (Optional[int]):", "commonly, because the environment is running in multiple processes.) There's", "appropriate terminal color codes to print colorized text. Valid colors:", "at once. (Most commonly, because the environment is running in", "* padding int_count = int(len(bytes) / sizeof_int) unpacked = struct.unpack(\"{}I\".format(int_count),", "seeds before using them. (This scheme is likely not crypto-strength,", "the Software, and to permit persons to whom the Software", "for seed: {} ({})'.format(type(a), a)) return a # TODO: don't", "and associated documentation files (the \"Software\"), to deal in the", "colorize(string, color, bold=False, highlight = False): \"\"\"Return string surrounded by", "non-negative, not {}'.format(bigint)) elif bigint == 0: return [0] ints", "active at once. (Most commonly, because the environment is running", "the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we hash", "a is None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a", "portions of the Software. # #THE SOFTWARE IS PROVIDED \"AS", "correlations between seeds of multiple PRNG's can correlate the outputs:", "time, which might be non-robust especially in the presence of", "to whom the Software is furnished to do so, subject", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "def colorize(string, color, bold=False, highlight = False): \"\"\"Return string surrounded", "linear correlations between seeds of multiple PRNG's can correlate the", "= [] while bigint > 0: bigint, mod = divmod(bigint,", "Args: seed (Optional[int]): None seeds from an operating system specific", "is likely to have many PRNG's active at once. (Most", "_bigint_from_bytes(bytes): sizeof_int = 4 padding = sizeof_int - len(bytes) %", "that having linear correlations between seeds of multiple PRNG's can", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "({})'.format(type(a), a)) return a # TODO: don't hardcode sizeof_int here", "a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a = a.encode('utf8') a", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "which might be non-robust especially in the presence of concurrency.", "use in the seed. \"\"\" # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if", "raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a)) return a", "all copies or substantial portions of the Software. # #THE", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "bigint == 0: return [0] ints = [] while bigint", "= 4 padding = sizeof_int - len(bytes) % sizeof_int bytes", "literature indicating that having linear correlations between seeds of multiple", "accum def _int_list_from_bigint(bigint): # Special case 0 if bigint <", "(the \"Software\"), to deal in the Software without restriction, including", "merge, publish, distribute, sublicense, and/or sell copies of the Software,", "seed. Otherwise, Python 2 would seed using the system time,", "DEALINGS IN THE SOFTWARE. import hashlib import numpy as np", "padding = sizeof_int - len(bytes) % sizeof_int bytes += b'\\0'", "error.Error('Seed must be non-negative, not {}'.format(bigint)) elif bigint == 0:", "(Optional[int, str]): None seeds from an operating system specific randomness", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "charge, to any person obtaining a copy of this software", "to print colorized text. Valid colors: gray, red, green, yellow,", "rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed def hash_seed(seed=None, max_bytes=8): \"\"\"Any given evaluation", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "* i) * val return accum def _int_list_from_bigint(bigint): # Special", "None: seed = create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def", "multiple PRNG's can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus,", "a # TODO: don't hardcode sizeof_int here def _bigint_from_bytes(bytes): sizeof_int", "in enumerate(unpacked): accum += 2 ** (sizeof_int * 8 *", "unpacked = struct.unpack(\"{}I\".format(int_count), bytes) accum = 0 for i, val", "% 2**(8 * max_bytes) else: raise error.Error('Invalid type for seed:", "system specific randomness source. max_bytes: Maximum number of bytes to", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "_bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): \"\"\"Create a strong random seed. Otherwise,", "case 0 if bigint < 0: raise error.Error('Seed must be", "in the Software without restriction, including without limitation the rights", "permission notice shall be included in all copies or substantial", "of concurrency. Args: a (Optional[int, str]): None seeds from an", "highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') attrs =", "string) def error(msg, *args): print(colorize('%s: %s'%('ERROR', msg % args), 'red'))", "(isinstance(seed, int) and 0 <= seed): raise error.Error('Seed must be", "bold: attr.append('1') attrs = ';'.join(attr) return '\\x1b[%sm%s\\x1b[0m' % (attrs, string)", "= create_seed(seed) rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed def", "Special case 0 if bigint < 0: raise error.Error('Seed must", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "return [0] ints = [] while bigint > 0: bigint,", "likely to have many PRNG's active at once. (Most commonly,", "not crypto-strength, but it should be good enough to get", "is None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a =", "hash the seeds before using them. (This scheme is likely", "hardcode sizeof_int here def _bigint_from_bytes(bytes): sizeof_int = 4 padding =", "* max_bytes) else: raise error.Error('Invalid type for seed: {} ({})'.format(type(a),", "str): a = a.encode('utf8') a += hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes])", "% sizeof_int bytes += b'\\0' * padding int_count = int(len(bytes)", "*args): print(colorize('%s: %s'%('ERROR', msg % args), 'red')) def np_random(seed=None): if", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "once. (Most commonly, because the environment is running in multiple", "evaluation is likely to have many PRNG's active at once.", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "the environment is running in multiple processes.) There's literature indicating", "not None and not (isinstance(seed, int) and 0 <= seed):", "copyright notice and this permission notice shall be included in", "= _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a = a.encode('utf8') a +=", "2016 OpenAI (https://openai.com) # #Permission is hereby granted, free of", "[] num = color2num[color] if highlight: num += 10 attr.append(str(num))", "because the environment is running in multiple processes.) There's literature", "be good enough to get rid of simple correlations.) Args:", "np import os import struct def colorize(string, color, bold=False, highlight", "b'\\0' * padding int_count = int(len(bytes) / sizeof_int) unpacked =", "int): a = a % 2**(8 * max_bytes) else: raise", "+= 10 attr.append(str(num)) if bold: attr.append('1') attrs = ';'.join(attr) return", "and to permit persons to whom the Software is furnished", "bigint, mod = divmod(bigint, 2 ** 32) ints.append(mod) return ints", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "= False): \"\"\"Return string surrounded by appropriate terminal color codes", "elif isinstance(a, int): a = a % 2**(8 * max_bytes)", "def create_seed(a=None, max_bytes=8): \"\"\"Create a strong random seed. Otherwise, Python", "os import struct def colorize(string, color, bold=False, highlight = False):", "a.encode('utf8') a += hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int):", "accum = 0 for i, val in enumerate(unpacked): accum +=", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "int) and 0 <= seed): raise error.Error('Seed must be a", "error(msg, *args): print(colorize('%s: %s'%('ERROR', msg % args), 'red')) def np_random(seed=None):", "Python 2 would seed using the system time, which might", "would seed using the system time, which might be non-robust", "- len(bytes) % sizeof_int bytes += b'\\0' * padding int_count", "here def _bigint_from_bytes(bytes): sizeof_int = 4 padding = sizeof_int -", "get rid of simple correlations.) Args: seed (Optional[int]): None seeds", "+= b'\\0' * padding int_count = int(len(bytes) / sizeof_int) unpacked", "whom the Software is furnished to do so, subject to", "seed: {} ({})'.format(type(a), a)) return a # TODO: don't hardcode", "raise error.Error('Seed must be non-negative, not {}'.format(bigint)) elif bigint ==", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import", "a (Optional[int, str]): None seeds from an operating system specific", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "_int_list_from_bigint(bigint): # Special case 0 if bigint < 0: raise", "is running in multiple processes.) There's literature indicating that having", "use in the hashed seed. \"\"\" if seed is None:", "obtaining a copy of this software and associated documentation files", "might be non-robust especially in the presence of concurrency. Args:", "rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed def hash_seed(seed=None, max_bytes=8):", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "(c) 2016 OpenAI (https://openai.com) # #Permission is hereby granted, free", "';'.join(attr) return '\\x1b[%sm%s\\x1b[0m' % (attrs, string) def error(msg, *args): print(colorize('%s:", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "a copy of this software and associated documentation files (the", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "of the Software. # #THE SOFTWARE IS PROVIDED \"AS IS\",", "sublicense, and/or sell copies of the Software, and to permit", "must be non-negative, not {}'.format(bigint)) elif bigint == 0: return", "presence of concurrency. Args: a (Optional[int, str]): None seeds from", "so, subject to the following conditions: # #The above copyright", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "red, green, yellow, blue, magenta, cyan, white, crimson \"\"\" attr", "Maximum number of bytes to use in the hashed seed.", "OpenAI (https://openai.com) # #Permission is hereby granted, free of charge,", "'red')) def np_random(seed=None): if seed is not None and not", "color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1')", "Valid colors: gray, red, green, yellow, blue, magenta, cyan, white,", "= color2num[color] if highlight: num += 10 attr.append(str(num)) if bold:", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. import hashlib", "from an operating system specific randomness source. max_bytes: Maximum number", "highlight = False): \"\"\"Return string surrounded by appropriate terminal color", "from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif", "the seed. \"\"\" # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is", "color, bold=False, highlight = False): \"\"\"Return string surrounded by appropriate", "for sanity we hash the seeds before using them. (This", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "correlations.) Args: seed (Optional[int]): None seeds from an operating system", "this permission notice shall be included in all copies or", "attr.append('1') attrs = ';'.join(attr) return '\\x1b[%sm%s\\x1b[0m' % (attrs, string) def", "a non-negative integer or omitted, not {}'.format(seed)) seed = create_seed(seed)", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "in the seed. \"\"\" # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a", "= struct.unpack(\"{}I\".format(int_count), bytes) accum = 0 for i, val in", "processes.) There's literature indicating that having linear correlations between seeds", "{} ({})'.format(type(a), a)) return a # TODO: don't hardcode sizeof_int", "error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed))", "above copyright notice and this permission notice shall be included", "integer or omitted, not {}'.format(seed)) seed = create_seed(seed) rng =", "print(colorize('%s: %s'%('ERROR', msg % args), 'red')) def np_random(seed=None): if seed", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "to get rid of simple correlations.) Args: seed (Optional[int]): None", "a = a % 2**(8 * max_bytes) else: raise error.Error('Invalid", "# #The above copyright notice and this permission notice shall", "seed. \"\"\" # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None:", "colors: gray, red, green, yellow, blue, magenta, cyan, white, crimson", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "error.Error('Invalid type for seed: {} ({})'.format(type(a), a)) return a #", "<reponame>AdrianP-/rlcard #The MIT License # #Copyright (c) 2020 DATA Lab", "str]): None seeds from an operating system specific randomness source.", "#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "yellow, blue, magenta, cyan, white, crimson \"\"\" attr = []", "elif isinstance(a, str): a = a.encode('utf8') a += hashlib.sha512(a).digest() a", "isinstance(a, int): a = a % 2**(8 * max_bytes) else:", "4 padding = sizeof_int - len(bytes) % sizeof_int bytes +=", "in all copies or substantial portions of the Software. #", "simple correlations.) Args: seed (Optional[int]): None seeds from an operating", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "is furnished to do so, subject to the following conditions:", "of simple correlations.) Args: seed (Optional[int]): None seeds from an", "seed (Optional[int]): None seeds from an operating system specific randomness", "max_bytes=8): \"\"\"Any given evaluation is likely to have many PRNG's", "to any person obtaining a copy of this software and", "# #Permission is hereby granted, free of charge, to any", "if seed is None: seed = create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest()", "shall be included in all copies or substantial portions of", "person obtaining a copy of this software and associated documentation", "system time, which might be non-robust especially in the presence", "struct.unpack(\"{}I\".format(int_count), bytes) accum = 0 for i, val in enumerate(unpacked):", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "blue, magenta, cyan, white, crimson \"\"\" attr = [] num", "source. max_bytes: Maximum number of bytes to use in the", "def np_random(seed=None): if seed is not None and not (isinstance(seed,", "= 0 for i, val in enumerate(unpacked): accum += 2", "THE SOFTWARE. import hashlib import numpy as np import os", "and this permission notice shall be included in all copies", "be a non-negative integer or omitted, not {}'.format(seed)) seed =", "0: raise error.Error('Seed must be non-negative, not {}'.format(bigint)) elif bigint", "PRNG's can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for", "isinstance(a, str): a = a.encode('utf8') a += hashlib.sha512(a).digest() a =", "attr.append(str(num)) if bold: attr.append('1') attrs = ';'.join(attr) return '\\x1b[%sm%s\\x1b[0m' %", "seeds of multiple PRNG's can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be", "SOFTWARE. import hashlib import numpy as np import os import", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "else: raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a)) return", "a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a = a %", "not (isinstance(seed, int) and 0 <= seed): raise error.Error('Seed must", "= create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8):", "= sizeof_int - len(bytes) % sizeof_int bytes += b'\\0' *", "running in multiple processes.) There's literature indicating that having linear", "\"\"\" if seed is None: seed = create_seed(max_bytes=max_bytes) hash =", "#The above copyright notice and this permission notice shall be", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "int(len(bytes) / sizeof_int) unpacked = struct.unpack(\"{}I\".format(int_count), bytes) accum = 0", "the seeds before using them. (This scheme is likely not", "free of charge, to any person obtaining a copy of", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "import hashlib import numpy as np import os import struct", "msg % args), 'red')) def np_random(seed=None): if seed is not", "number of bytes to use in the hashed seed. \"\"\"", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "the Software is furnished to do so, subject to the", "Software, and to permit persons to whom the Software is", "OR OTHER DEALINGS IN THE SOFTWARE. import hashlib import numpy", "gray, red, green, yellow, blue, magenta, cyan, white, crimson \"\"\"", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "at Texas A&M University #Copyright (c) 2016 OpenAI (https://openai.com) #", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "(Most commonly, because the environment is running in multiple processes.)", "documentation files (the \"Software\"), to deal in the Software without", "for i, val in enumerate(unpacked): accum += 2 ** (sizeof_int", "_bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a = a % 2**(8 *", "non-robust especially in the presence of concurrency. Args: a (Optional[int,", "or substantial portions of the Software. # #THE SOFTWARE IS", "import numpy as np import os import struct def colorize(string,", "magenta, cyan, white, crimson \"\"\" attr = [] num =", "without restriction, including without limitation the rights to use, copy,", "seed = create_seed(seed) rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "as np import os import struct def colorize(string, color, bold=False,", "= np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed))) return rng, seed def hash_seed(seed=None, max_bytes=8): \"\"\"Any", "type for seed: {} ({})'.format(type(a), a)) return a # TODO:", "val in enumerate(unpacked): accum += 2 ** (sizeof_int * 8", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "return '\\x1b[%sm%s\\x1b[0m' % (attrs, string) def error(msg, *args): print(colorize('%s: %s'%('ERROR',", "'\\x1b[%sm%s\\x1b[0m' % (attrs, string) def error(msg, *args): print(colorize('%s: %s'%('ERROR', msg", "especially in the presence of concurrency. Args: a (Optional[int, str]):", "crypto-strength, but it should be good enough to get rid", "likely not crypto-strength, but it should be good enough to", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "def error(msg, *args): print(colorize('%s: %s'%('ERROR', msg % args), 'red')) def", "= hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): \"\"\"Create a strong", "None seeds from an operating system specific randomness source. max_bytes:", "granted, free of charge, to any person obtaining a copy", "http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we hash the seeds before using", "multiple processes.) There's literature indicating that having linear correlations between", "them. (This scheme is likely not crypto-strength, but it should", "of charge, to any person obtaining a copy of this", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "val return accum def _int_list_from_bigint(bigint): # Special case 0 if", "the system time, which might be non-robust especially in the", "and 0 <= seed): raise error.Error('Seed must be a non-negative", "the Software. # #THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "Maximum number of bytes to use in the seed. \"\"\"", "# Special case 0 if bigint < 0: raise error.Error('Seed", "hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a = a", "Args: a (Optional[int, str]): None seeds from an operating system", "random seed. Otherwise, Python 2 would seed using the system", "# #THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "a = a.encode('utf8') a += hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif", "attrs = ';'.join(attr) return '\\x1b[%sm%s\\x1b[0m' % (attrs, string) def error(msg,", "randomness source. max_bytes: Maximum number of bytes to use in", "omitted, not {}'.format(seed)) seed = create_seed(seed) rng = np.random.RandomState() rng.seed(_int_list_from_bigint(hash_seed(seed)))", "associated documentation files (the \"Software\"), to deal in the Software", "given evaluation is likely to have many PRNG's active at", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "text. Valid colors: gray, red, green, yellow, blue, magenta, cyan,", "(attrs, string) def error(msg, *args): print(colorize('%s: %s'%('ERROR', msg % args),", "a % 2**(8 * max_bytes) else: raise error.Error('Invalid type for", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "inputs[\"raw_labels\"]: labels[self.class_map[c]] = 1.0 transformed = dict(inputs) transformed[\"labels\"] = labels", "def __call__(self, dataset, **inputs): transform = random.choice(self.transforms) return transform(**inputs) class", "class_map, drop_raw=True): self.class_map = class_map def __call__(self, dataset, **inputs): labels", "= int(n_fft) self.hop_size = int(hop_size) self.n_features = self.n_fft // 2", "transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length] return transformed class OneOf: def __init__(self, transforms):", "MixUp(Augmentation): def __init__(self, p): self.p = p def __call__(self, dataset,", "p=0.5): self.area = area self.p = p def __call__(self, dataset,", "dict() for name, input in inputs.items(): if not name in", "np.random.randint(0, inputs[\"audio\"].size - max_length) transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length] return transformed class", "sr=transformed[\"sr\"]) return transformed class CutOut(Augmentation): def __init__(self, area=0.25, p=0.5): self.area", "np.transpose(stft) return transformed class AudioFeatures: eps = 1e-4 def __init__(self,", "self.n_mel self.padding_value = 0.0 if verbose: print( \"\\nUsing mel features", ") .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1)) ) transformed[\"audio\"] =", "return transformed class LoadAudio: def __init__(self): pass def __call__(self, dataset,", "( read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE =", "transformed class AudioFeatures: eps = 1e-4 def __init__(self, descriptor, verbose=True):", "= descriptor.split(\"_\") self.feature_type = name if name == \"stft\": n_fft,", "-1) return transformed class SampleSegment(Augmentation): def __init__(self, ratio=(0.3, 0.9), p=1.0):", "for t in self.transforms: if isinstance(t, Augmentation): t.p = 0.0", "= audio transformed[\"sr\"] = sr return transformed class STFT: eps", "np.zeros(len(self.class_map), dtype=np.float32) for c in inputs[\"raw_labels\"]: labels[self.class_map[c]] = 1.0 transformed", "import torch from ops.audio import ( read_audio, compute_stft, trim_audio, mix_audio_and_labels,", "hop_size: {}, n_mel: {}\".format( n_fft, hop_size, n_mel ) ) elif", "**inputs): transformed = dict(inputs) if np.random.uniform() < self.p: effects_chain =", "self.p: original_size = inputs[\"audio\"].size target_size = int(np.random.uniform(self.min, self.max) * original_size)", "self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_mel = int(n_mel) self.n_features", "np.random.uniform() < self.p: transformed[\"audio\"] = cutout( transformed[\"audio\"], self.area) return transformed", "import librosa import numpy as np import torch from ops.audio", "__init__(self, n_fft, hop_size): self.n_fft = n_fft self.hop_size = hop_size def", "name == \"raw\": self.n_features = 1 self.padding_value = 0.0 if", "np.random.uniform() < self.p: transformed[\"audio\"] = shuffle_audio( transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"]) return", "start = np.random.randint(original_size - target_size - 1) transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size]", "= mapping def __call__(self, dataset, **inputs): transformed = dict(inputs) for", "1.0 transformed = dict(inputs) transformed[\"labels\"] = labels transformed.pop(\"raw_labels\") return transformed", "* original_size) start = np.random.randint(original_size - target_size - 1) transformed[\"audio\"]", "transform(**inputs) class DropFields: def __init__(self, fields): self.to_drop = fields def", "in self.transforms: if isinstance(t, Augmentation): t.p = 0.0 def __call__(self,", "self.mapping = mapping def __call__(self, dataset, **inputs): transformed = dict(inputs)", "\"\\nUsing STFT features with params:\\n\", \"n_fft: {}, hop_size: {}\".format( n_fft,", "= transformed.pop(old) return transformed class Compose: def __init__(self, transforms): self.transforms", "self.transforms: inputs = t(dataset=dataset, **inputs) return inputs class Identity: def", "self.chunk_length = chunk_length self.p = p def __call__(self, dataset, **inputs):", "= 1.0 transformed = dict(inputs) transformed[\"labels\"] = labels transformed.pop(\"raw_labels\") return", "in self.to_drop: transformed[name] = input return transformed class RenameFields: def", "**inputs): transformed = dict(inputs) if np.random.uniform() < self.p: first_audio, first_labels", "return transformed class CutOut(Augmentation): def __init__(self, area=0.25, p=0.5): self.area =", "# stft = compute_stft( # inputs[\"audio\"], # window_size=self.n_fft, hop_size=self.hop_size, #", "n_mel = args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_mel", "= self.n_fft // 2 + 1 self.padding_value = 0.0 if", "pysndfx import librosa import numpy as np import torch from", "print( \"\\nUsing STFT features with params:\\n\", \"n_fft: {}, hop_size: {}\".format(", "self.min, self.max = ratio self.p = p def __call__(self, dataset,", "10)) .speed(random.uniform(0.9, 1.1)) ) transformed[\"audio\"] = effects_chain(inputs[\"audio\"]) return transformed class", "target_size - 1) transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size] return transformed class ShuffleAudio(Augmentation):", "def __init__(self, class_map, drop_raw=True): self.class_map = class_map def __call__(self, dataset,", "transformed[\"labels\"] = labels transformed.pop(\"raw_labels\") return transformed class MixUp(Augmentation): def __init__(self,", "transforms): self.transforms = transforms def switch_off_augmentations(self): for t in self.transforms:", "= int(hop_size) self.n_features = self.n_fft // 2 + 1 self.padding_value", "transformed[name] = input return transformed class RenameFields: def __init__(self, mapping):", "def __init__(self, ratio=(0.3, 0.9), p=1.0): self.min, self.max = ratio self.p", "in self.mapping.items(): transformed[new] = transformed.pop(old) return transformed class Compose: def", "transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size] return transformed class ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5,", "class ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5, p=0.5): self.chunk_length = chunk_length self.p", "if np.random.uniform() < self.p: transformed[\"audio\"] = cutout( transformed[\"audio\"], self.area) return", "n_fft, hop_size ) ) elif name == \"mel\": n_fft, hop_size,", "__call__(self, dataset, **inputs): stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps)", "SampleLongAudio: def __init__(self, max_length): self.max_length = max_length def __call__(self, dataset,", "self.n_fft = n_fft self.hop_size = hop_size def __call__(self, dataset, **inputs):", "0.0 if verbose: print( \"\\nUsing STFT features with params:\\n\", \"n_fft:", "if self.feature_type == \"stft\": # stft = compute_stft( # inputs[\"audio\"],", "SAMPLE_RATE = 44100 class Augmentation: \"\"\"A base class for data", "transformed class ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5, p=0.5): self.chunk_length = chunk_length", "= effects_chain(inputs[\"audio\"]) return transformed class LoadAudio: def __init__(self): pass def", "dataset, **inputs): labels = np.zeros(len(self.class_map), dtype=np.float32) for c in inputs[\"raw_labels\"]:", "for c in inputs[\"raw_labels\"]: labels[self.class_map[c]] = 1.0 transformed = dict(inputs)", "\"\"\"A base class for data augmentation transforms\"\"\" pass class MapLabels:", "class STFT: eps = 1e-4 def __init__(self, n_fft, hop_size): self.n_fft", "**inputs): transformed = dict(inputs) if np.random.uniform() < self.p: original_size =", "math from functools import partial import json import pysndfx import", "self.to_drop: transformed[name] = input return transformed class RenameFields: def __init__(self,", "compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed = dict(inputs) transformed[\"stft\"] =", "__init__(self): pass def __call__(self, dataset, **inputs): audio, sr = read_audio(inputs[\"filename\"])", "name if name == \"stft\": n_fft, hop_size = args self.n_fft", "with params:\\n\", \"n_fft: {}, hop_size: {}\".format( n_fft, hop_size ) )", "= np.transpose(stft) return transformed class AudioFeatures: eps = 1e-4 def", "= 0.0 if verbose: print( \"\\nUsing STFT features with params:\\n\",", "if np.random.uniform() < self.p: transformed[\"audio\"] = shuffle_audio( transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"])", "transforms\"\"\" pass class MapLabels: def __init__(self, class_map, drop_raw=True): self.class_map =", "log=True # ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type ==", "= dict(inputs) transformed[\"labels\"] = labels transformed.pop(\"raw_labels\") return transformed class MixUp(Augmentation):", "STFT: eps = 1e-4 def __init__(self, n_fft, hop_size): self.n_fft =", "eps = 1e-4 def __init__(self, descriptor, verbose=True): name, *args =", "transformed class MixUp(Augmentation): def __init__(self, p): self.p = p def", "self.hop_size = int(hop_size) self.n_mel = int(n_mel) self.n_features = self.n_mel self.padding_value", "= area self.p = p def __call__(self, dataset, **inputs): transformed", "return transform(**inputs) class DropFields: def __init__(self, fields): self.to_drop = fields", "__call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p:", "verbose: print( \"\\nUsing mel features with params:\\n\", \"n_fft: {}, hop_size:", "mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE = 44100 class Augmentation: \"\"\"A", "name, *args = descriptor.split(\"_\") self.feature_type = name if name ==", "self.p = p def __call__(self, dataset, **inputs): transformed = dict(inputs)", "# inputs[\"audio\"], # window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps, log=True # )", "effects_chain = ( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300,", "np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"mel\": stft = compute_stft( inputs[\"audio\"],", "first_labels = inputs[\"audio\"], inputs[\"labels\"] random_sample = dataset.random_clean_sample() new_audio, new_labels =", "hop_size=self.hop_size, # eps=self.eps, log=True # ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1)", "self.feature_type == \"raw\": transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) return transformed class", "new_labels return transformed class FlipAudio(Augmentation): def __init__(self, p): self.p =", "= ( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300))", "transformed[\"audio\"], self.area) return transformed class SampleLongAudio: def __init__(self, max_length): self.max_length", "__init__(self, max_length): self.max_length = max_length def __call__(self, dataset, **inputs): transformed", "random.choice(self.transforms) return transform(**inputs) class DropFields: def __init__(self, fields): self.to_drop =", "// 2 + 1 self.padding_value = 0.0 if verbose: print(", ") ) elif name == \"raw\": self.n_features = 1 self.padding_value", "transformed = dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = cutout(", "= np.random.randint(0, inputs[\"audio\"].size - max_length) transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length] return transformed", "input in inputs.items(): if not name in self.to_drop: transformed[name] =", "transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"mel\": stft =", ") SAMPLE_RATE = 44100 class Augmentation: \"\"\"A base class for", "= new_audio transformed[\"labels\"] = new_labels return transformed class FlipAudio(Augmentation): def", "< self.p: effects_chain = ( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50)", "dict(inputs) if np.random.uniform() < self.p: original_size = inputs[\"audio\"].size target_size =", "self.hop_size = int(hop_size) self.n_features = self.n_fft // 2 + 1", "ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5, p=0.5): self.chunk_length = chunk_length self.p =", "inputs[\"audio\"], # window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps, log=True # ) transformed[\"signal\"]", "== \"stft\": # stft = compute_stft( # inputs[\"audio\"], # window_size=self.n_fft,", "self.n_features = self.n_mel self.padding_value = 0.0 if verbose: print( \"\\nUsing", "dataset, **inputs): stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed", "dataset, **inputs): audio, sr = read_audio(inputs[\"filename\"]) transformed = dict(inputs) transformed[\"audio\"]", "self.p: transformed[\"audio\"] = shuffle_audio( transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"]) return transformed class", "first_audio, random_sample[\"audio\"], first_labels, random_sample[\"labels\"] ) transformed[\"audio\"] = new_audio transformed[\"labels\"] =", "transformed class RenameFields: def __init__(self, mapping): self.mapping = mapping def", "# window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps, log=True # ) transformed[\"signal\"] =", ") elif name == \"raw\": self.n_features = 1 self.padding_value =", "p def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform()", "{}, hop_size: {}, n_mel: {}\".format( n_fft, hop_size, n_mel ) )", "class FlipAudio(Augmentation): def __init__(self, p): self.p = p def __call__(self,", "if np.random.uniform() < self.p: transformed[\"audio\"] = np.flipud(inputs[\"audio\"]) return transformed class", "hop_size): self.n_fft = n_fft self.hop_size = hop_size def __call__(self, dataset,", "transformed = dict(inputs) if self.feature_type == \"stft\": # stft =", "self.feature_type == \"stft\": # stft = compute_stft( # inputs[\"audio\"], #", "input return transformed class RenameFields: def __init__(self, mapping): self.mapping =", "target_size = int(np.random.uniform(self.min, self.max) * original_size) start = np.random.randint(original_size -", "self.transforms = transforms def switch_off_augmentations(self): for t in self.transforms: if", "\"raw\": transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) return transformed class SampleSegment(Augmentation): def", "self.mapping.items(): transformed[new] = transformed.pop(old) return transformed class Compose: def __init__(self,", "dict(inputs) if np.random.uniform() < self.p: first_audio, first_labels = inputs[\"audio\"], inputs[\"labels\"]", "name == \"mel\": n_fft, hop_size, n_mel = args self.n_fft =", ".speed(random.uniform(0.9, 1.1)) ) transformed[\"audio\"] = effects_chain(inputs[\"audio\"]) return transformed class LoadAudio:", "switch_off_augmentations(self): for t in self.transforms: if isinstance(t, Augmentation): t.p =", "functools import partial import json import pysndfx import librosa import", "shuffle_audio, cutout ) SAMPLE_RATE = 44100 class Augmentation: \"\"\"A base", "# ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"mel\":", "inputs[\"audio\"], inputs[\"labels\"] random_sample = dataset.random_clean_sample() new_audio, new_labels = mix_audio_and_labels( first_audio,", "n_mel: {}\".format( n_fft, hop_size, n_mel ) ) elif name ==", "__init__(self, fields): self.to_drop = fields def __call__(self, dataset, **inputs): transformed", "int(hop_size) self.n_mel = int(n_mel) self.n_features = self.n_mel self.padding_value = 0.0", "__call__(self, dataset, **inputs): transformed = dict(inputs) if self.feature_type == \"stft\":", "= np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"raw\": transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"],", "compute_stft( # inputs[\"audio\"], # window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps, log=True #", "= mix_audio_and_labels( first_audio, random_sample[\"audio\"], first_labels, random_sample[\"labels\"] ) transformed[\"audio\"] = new_audio", "transformed[\"audio\"] = cutout( transformed[\"audio\"], self.area) return transformed class SampleLongAudio: def", "dict(inputs) transformed[\"labels\"] = labels transformed.pop(\"raw_labels\") return transformed class MixUp(Augmentation): def", "0.0 def __call__(self, dataset=None, **inputs): for t in self.transforms: inputs", "first_audio, first_labels = inputs[\"audio\"], inputs[\"labels\"] random_sample = dataset.random_clean_sample() new_audio, new_labels", "- 1) transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size] return transformed class ShuffleAudio(Augmentation): def", "features with params:\\n\", \"n_fft: {}, hop_size: {}\".format( n_fft, hop_size )", "self.n_features = self.n_fft // 2 + 1 self.padding_value = 0.0", "self.max_length = max_length def __call__(self, dataset, **inputs): transformed = dict(inputs)", "= dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = cutout( transformed[\"audio\"],", "return transformed class ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5, p=0.5): self.chunk_length =", "max_length) transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length] return transformed class OneOf: def __init__(self,", "= hop_size def __call__(self, dataset, **inputs): stft = compute_stft( inputs[\"audio\"],", "hop_size def __call__(self, dataset, **inputs): stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft,", "= p def __call__(self, dataset, **inputs): transformed = dict(inputs) if", "first_labels, random_sample[\"labels\"] ) transformed[\"audio\"] = new_audio transformed[\"labels\"] = new_labels return", "dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: first_audio,", "class SampleLongAudio: def __init__(self, max_length): self.max_length = max_length def __call__(self,", "print( \"\\nUsing raw waveform features.\" ) def __call__(self, dataset, **inputs):", ") def __call__(self, dataset, **inputs): transformed = dict(inputs) if self.feature_type", "eps=self.eps, log=True # ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type", "self.padding_value = 0.0 if verbose: print( \"\\nUsing STFT features with", "def __call__(self, dataset, **inputs): transformed = dict(inputs) if np.random.uniform() <", "dataset, **inputs): transformed = dict(inputs) for old, new in self.mapping.items():", "transformed[\"audio\"] = shuffle_audio( transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"]) return transformed class CutOut(Augmentation):", "c in inputs[\"raw_labels\"]: labels[self.class_map[c]] = 1.0 transformed = dict(inputs) transformed[\"labels\"]", "n_fft, hop_size, n_mel ) ) elif name == \"raw\": self.n_features", "def __init__(self, transforms): self.transforms = transforms def __call__(self, dataset, **inputs):", "**inputs): audio, sr = read_audio(inputs[\"filename\"]) transformed = dict(inputs) transformed[\"audio\"] =", "class RenameFields: def __init__(self, mapping): self.mapping = mapping def __call__(self,", "dict(inputs) if np.random.uniform() < self.p: effects_chain = ( pysndfx.AudioEffectsChain() .reverb(", "int(np.random.uniform(self.min, self.max) * original_size) start = np.random.randint(original_size - target_size -", "Augmentation): t.p = 0.0 def __call__(self, dataset=None, **inputs): for t", "fields def __call__(self, dataset, **inputs): transformed = dict() for name,", "__call__(self, dataset, **inputs): audio, sr = read_audio(inputs[\"filename\"]) transformed = dict(inputs)", "1 self.padding_value = 0.0 if verbose: print( \"\\nUsing STFT features", "transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"raw\": transformed[\"signal\"] =", "self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_features = self.n_fft //", "Augmentation: \"\"\"A base class for data augmentation transforms\"\"\" pass class", "def __init__(self, chunk_length=0.5, p=0.5): self.chunk_length = chunk_length self.p = p", "transformed[\"labels\"] = new_labels return transformed class FlipAudio(Augmentation): def __init__(self, p):", "def __call__(self, dataset, **inputs): labels = np.zeros(len(self.class_map), dtype=np.float32) for c", "if verbose: print( \"\\nUsing mel features with params:\\n\", \"n_fft: {},", "- max_length) transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length] return transformed class OneOf: def", "area=0.25, p=0.5): self.area = area self.p = p def __call__(self,", "__init__(self, mapping): self.mapping = mapping def __call__(self, dataset, **inputs): transformed", "dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: effects_chain", "transformed class Compose: def __init__(self, transforms): self.transforms = transforms def", "np.random.uniform() < self.p: original_size = inputs[\"audio\"].size target_size = int(np.random.uniform(self.min, self.max)", "def __call__(self, dataset, **inputs): transformed = dict(inputs) if self.feature_type ==", "dict(inputs) transformed[\"audio\"] = audio transformed[\"sr\"] = sr return transformed class", "if isinstance(t, Augmentation): t.p = 0.0 def __call__(self, dataset=None, **inputs):", "self.p: transformed[\"audio\"] = np.flipud(inputs[\"audio\"]) return transformed class AudioAugmentation(Augmentation): def __init__(self,", "audio, sr = read_audio(inputs[\"filename\"]) transformed = dict(inputs) transformed[\"audio\"] = audio", "{}, n_mel: {}\".format( n_fft, hop_size, n_mel ) ) elif name", "new_audio transformed[\"labels\"] = new_labels return transformed class FlipAudio(Augmentation): def __init__(self,", "def __init__(self, mapping): self.mapping = mapping def __call__(self, dataset, **inputs):", "STFT features with params:\\n\", \"n_fft: {}, hop_size: {}\".format( n_fft, hop_size", "inputs[\"sr\"]) > self.max_length: max_length = self.max_length * inputs[\"sr\"] start =", "= inputs[\"audio\"].size target_size = int(np.random.uniform(self.min, self.max) * original_size) start =", "self.max_length * inputs[\"sr\"] start = np.random.randint(0, inputs[\"audio\"].size - max_length) transformed[\"audio\"]", "hop_size=self.hop_size, eps=self.eps, log=False ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type", "original_size) start = np.random.randint(original_size - target_size - 1) transformed[\"audio\"] =", "= args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_mel =", "int(n_fft) self.hop_size = int(hop_size) self.n_mel = int(n_mel) self.n_features = self.n_mel", "verbose=True): name, *args = descriptor.split(\"_\") self.feature_type = name if name", "from functools import partial import json import pysndfx import librosa", "if verbose: print( \"\\nUsing raw waveform features.\" ) def __call__(self,", "return transformed class AudioAugmentation(Augmentation): def __init__(self, p): self.p = p", "self.to_drop = fields def __call__(self, dataset, **inputs): transformed = dict()", "# eps=self.eps, log=True # ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif", "name in self.to_drop: transformed[name] = input return transformed class RenameFields:", "\"\\nUsing raw waveform features.\" ) def __call__(self, dataset, **inputs): transformed", ") transformed[\"audio\"] = new_audio transformed[\"labels\"] = new_labels return transformed class", "def __init__(self, max_length): self.max_length = max_length def __call__(self, dataset, **inputs):", "return transformed class SampleLongAudio: def __init__(self, max_length): self.max_length = max_length", "descriptor.split(\"_\") self.feature_type = name if name == \"stft\": n_fft, hop_size", "ratio=(0.3, 0.9), p=1.0): self.min, self.max = ratio self.p = p", "/ inputs[\"sr\"]) > self.max_length: max_length = self.max_length * inputs[\"sr\"] start", ".pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1)) ) transformed[\"audio\"] = effects_chain(inputs[\"audio\"])", "inputs[\"audio\"].size target_size = int(np.random.uniform(self.min, self.max) * original_size) start = np.random.randint(original_size", "mapping): self.mapping = mapping def __call__(self, dataset, **inputs): transformed =", "= compute_stft( # inputs[\"audio\"], # window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps, log=True", "stft = compute_stft( # inputs[\"audio\"], # window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps,", "= t(dataset=dataset, **inputs) return inputs class Identity: def __call__(self, dataset=None,", "dataset, **inputs): transformed = dict() for name, input in inputs.items():", "**inputs) return inputs class Identity: def __call__(self, dataset=None, **inputs): return", "**inputs): transformed = dict(inputs) if self.feature_type == \"stft\": # stft", "if np.random.uniform() < self.p: first_audio, first_labels = inputs[\"audio\"], inputs[\"labels\"] random_sample", "waveform features.\" ) def __call__(self, dataset, **inputs): transformed = dict(inputs)", "self.padding_value = 0.0 if verbose: print( \"\\nUsing mel features with", "augmentation transforms\"\"\" pass class MapLabels: def __init__(self, class_map, drop_raw=True): self.class_map", "Compose: def __init__(self, transforms): self.transforms = transforms def switch_off_augmentations(self): for", "return transformed class OneOf: def __init__(self, transforms): self.transforms = transforms", "dict(inputs) if self.feature_type == \"stft\": # stft = compute_stft( #", "= 0.0 def __call__(self, dataset=None, **inputs): for t in self.transforms:", "= args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_features =", "window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif", "n_fft, hop_size, n_mel = args self.n_fft = int(n_fft) self.hop_size =", "chunk_length=0.5, p=0.5): self.chunk_length = chunk_length self.p = p def __call__(self,", "= compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed = dict(inputs) transformed[\"stft\"]", "features with params:\\n\", \"n_fft: {}, hop_size: {}, n_mel: {}\".format( n_fft,", "data augmentation transforms\"\"\" pass class MapLabels: def __init__(self, class_map, drop_raw=True):", "log=False ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"raw\":", "dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: original_size", "mix_audio_and_labels( first_audio, random_sample[\"audio\"], first_labels, random_sample[\"labels\"] ) transformed[\"audio\"] = new_audio transformed[\"labels\"]", "elif name == \"mel\": n_fft, hop_size, n_mel = args self.n_fft", "transformed = dict() for name, input in inputs.items(): if not", "p): self.p = p def __call__(self, dataset, **inputs): transformed =", "= inputs[\"audio\"][start:start+target_size] return transformed class ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5, p=0.5):", "__init__(self, transforms): self.transforms = transforms def __call__(self, dataset, **inputs): transform", "max_length = self.max_length * inputs[\"sr\"] start = np.random.randint(0, inputs[\"audio\"].size -", "cutout ) SAMPLE_RATE = 44100 class Augmentation: \"\"\"A base class", "fields): self.to_drop = fields def __call__(self, dataset, **inputs): transformed =", "stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed = dict(inputs)", "= shuffle_audio( transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"]) return transformed class CutOut(Augmentation): def", "np.random.uniform() < self.p: effects_chain = ( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50),", "class for data augmentation transforms\"\"\" pass class MapLabels: def __init__(self,", "RenameFields: def __init__(self, mapping): self.mapping = mapping def __call__(self, dataset,", "random_sample[\"labels\"] ) transformed[\"audio\"] = new_audio transformed[\"labels\"] = new_labels return transformed", "< self.p: original_size = inputs[\"audio\"].size target_size = int(np.random.uniform(self.min, self.max) *", "eps=self.eps, log=False ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type ==", "= dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = np.flipud(inputs[\"audio\"]) return", "transformed[\"audio\"] = new_audio transformed[\"labels\"] = new_labels return transformed class FlipAudio(Augmentation):", "= random.choice(self.transforms) return transform(**inputs) class DropFields: def __init__(self, fields): self.to_drop", "torch from ops.audio import ( read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio,", "dict(inputs) transformed[\"stft\"] = np.transpose(stft) return transformed class AudioFeatures: eps =", "def __init__(self): pass def __call__(self, dataset, **inputs): audio, sr =", "AudioAugmentation(Augmentation): def __init__(self, p): self.p = p def __call__(self, dataset,", "self.p: effects_chain = ( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) )", "__call__(self, dataset, **inputs): transformed = dict(inputs) for old, new in", "pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10))", "= dict(inputs) if (inputs[\"audio\"].size / inputs[\"sr\"]) > self.max_length: max_length =", "**inputs): transformed = dict(inputs) for old, new in self.mapping.items(): transformed[new]", "transformed[\"stft\"] = np.transpose(stft) return transformed class AudioFeatures: eps = 1e-4", "if np.random.uniform() < self.p: effects_chain = ( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50),", "transformed class STFT: eps = 1e-4 def __init__(self, n_fft, hop_size):", "if not name in self.to_drop: transformed[name] = input return transformed", "dataset=None, **inputs): for t in self.transforms: inputs = t(dataset=dataset, **inputs)", "for t in self.transforms: inputs = t(dataset=dataset, **inputs) return inputs", "dtype=np.float32) for c in inputs[\"raw_labels\"]: labels[self.class_map[c]] = 1.0 transformed =", "inputs[\"audio\"].size - max_length) transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length] return transformed class OneOf:", "__init__(self, transforms): self.transforms = transforms def switch_off_augmentations(self): for t in", "self.feature_type == \"mel\": stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps,", "self.area) return transformed class SampleLongAudio: def __init__(self, max_length): self.max_length =", "-1) elif self.feature_type == \"raw\": transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) return", "\"n_fft: {}, hop_size: {}\".format( n_fft, hop_size ) ) elif name", "= chunk_length self.p = p def __call__(self, dataset, **inputs): transformed", "t in self.transforms: if isinstance(t, Augmentation): t.p = 0.0 def", "in self.transforms: inputs = t(dataset=dataset, **inputs) return inputs class Identity:", "= np.flipud(inputs[\"audio\"]) return transformed class AudioAugmentation(Augmentation): def __init__(self, p): self.p", "np.random.randint(original_size - target_size - 1) transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size] return transformed", "__call__(self, dataset, **inputs): transformed = dict() for name, input in", "elif name == \"raw\": self.n_features = 1 self.padding_value = 0.0", "= dict(inputs) if np.random.uniform() < self.p: first_audio, first_labels = inputs[\"audio\"],", "300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1)) ) transformed[\"audio\"] = effects_chain(inputs[\"audio\"]) return", "inputs[\"audio\"][start:start+max_length] return transformed class OneOf: def __init__(self, transforms): self.transforms =", "= self.n_mel self.padding_value = 0.0 if verbose: print( \"\\nUsing mel", "base class for data augmentation transforms\"\"\" pass class MapLabels: def", "self.class_map = class_map def __call__(self, dataset, **inputs): labels = np.zeros(len(self.class_map),", ") ) elif name == \"mel\": n_fft, hop_size, n_mel =", "= dataset.random_clean_sample() new_audio, new_labels = mix_audio_and_labels( first_audio, random_sample[\"audio\"], first_labels, random_sample[\"labels\"]", "{}, hop_size: {}\".format( n_fft, hop_size ) ) elif name ==", "1 self.padding_value = 0.0 if verbose: print( \"\\nUsing raw waveform", "import math from functools import partial import json import pysndfx", "import partial import json import pysndfx import librosa import numpy", "inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1)", ") transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"mel\": stft", "def __init__(self, transforms): self.transforms = transforms def switch_off_augmentations(self): for t", "def __call__(self, dataset, **inputs): transformed = dict(inputs) if (inputs[\"audio\"].size /", "dataset.random_clean_sample() new_audio, new_labels = mix_audio_and_labels( first_audio, random_sample[\"audio\"], first_labels, random_sample[\"labels\"] )", "< self.p: transformed[\"audio\"] = np.flipud(inputs[\"audio\"]) return transformed class AudioAugmentation(Augmentation): def", "= cutout( transformed[\"audio\"], self.area) return transformed class SampleLongAudio: def __init__(self,", "class AudioFeatures: eps = 1e-4 def __init__(self, descriptor, verbose=True): name,", "= np.expand_dims(inputs[\"audio\"], -1) return transformed class SampleSegment(Augmentation): def __init__(self, ratio=(0.3,", "**inputs): labels = np.zeros(len(self.class_map), dtype=np.float32) for c in inputs[\"raw_labels\"]: labels[self.class_map[c]]", "self.area = area self.p = p def __call__(self, dataset, **inputs):", "t(dataset=dataset, **inputs) return inputs class Identity: def __call__(self, dataset=None, **inputs):", "dataset, **inputs): transformed = dict(inputs) if self.feature_type == \"stft\": #", "transformed = dict(inputs) if (inputs[\"audio\"].size / inputs[\"sr\"]) > self.max_length: max_length", "*args = descriptor.split(\"_\") self.feature_type = name if name == \"stft\":", "**inputs): stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed =", "transformed[\"audio\"] = audio transformed[\"sr\"] = sr return transformed class STFT:", "hop_size: {}\".format( n_fft, hop_size ) ) elif name == \"mel\":", "ratio self.p = p def __call__(self, dataset, **inputs): transformed =", "not name in self.to_drop: transformed[name] = input return transformed class", "labels[self.class_map[c]] = 1.0 transformed = dict(inputs) transformed[\"labels\"] = labels transformed.pop(\"raw_labels\")", "dict(inputs) for old, new in self.mapping.items(): transformed[new] = transformed.pop(old) return", "with params:\\n\", \"n_fft: {}, hop_size: {}, n_mel: {}\".format( n_fft, hop_size,", "np.random.uniform() < self.p: transformed[\"audio\"] = np.flipud(inputs[\"audio\"]) return transformed class AudioAugmentation(Augmentation):", "transformed = dict(inputs) for old, new in self.mapping.items(): transformed[new] =", "< self.p: transformed[\"audio\"] = shuffle_audio( transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"]) return transformed", "n_fft self.hop_size = hop_size def __call__(self, dataset, **inputs): stft =", "shuffle_audio( transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"]) return transformed class CutOut(Augmentation): def __init__(self,", "transforms): self.transforms = transforms def __call__(self, dataset, **inputs): transform =", "def __call__(self, dataset, **inputs): transformed = dict(inputs) for old, new", "def __init__(self, n_fft, hop_size): self.n_fft = n_fft self.hop_size = hop_size", "def __call__(self, dataset, **inputs): stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size,", "transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) return transformed class SampleSegment(Augmentation): def __init__(self,", "1.1)) ) transformed[\"audio\"] = effects_chain(inputs[\"audio\"]) return transformed class LoadAudio: def", "self.feature_type = name if name == \"stft\": n_fft, hop_size =", "n_fft, hop_size = args self.n_fft = int(n_fft) self.hop_size = int(hop_size)", ") transformed[\"audio\"] = effects_chain(inputs[\"audio\"]) return transformed class LoadAudio: def __init__(self):", "== \"mel\": n_fft, hop_size, n_mel = args self.n_fft = int(n_fft)", "< self.p: first_audio, first_labels = inputs[\"audio\"], inputs[\"labels\"] random_sample = dataset.random_clean_sample()", "transformed[new] = transformed.pop(old) return transformed class Compose: def __init__(self, transforms):", "if np.random.uniform() < self.p: original_size = inputs[\"audio\"].size target_size = int(np.random.uniform(self.min,", "class_map def __call__(self, dataset, **inputs): labels = np.zeros(len(self.class_map), dtype=np.float32) for", "hop_size=self.hop_size, eps=self.eps) transformed = dict(inputs) transformed[\"stft\"] = np.transpose(stft) return transformed", "__init__(self, area=0.25, p=0.5): self.area = area self.p = p def", "elif self.feature_type == \"mel\": stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size,", "return inputs class Identity: def __call__(self, dataset=None, **inputs): return inputs", "ops.audio import ( read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout )", "read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE = 44100", "import ( read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE", "dataset, **inputs): transformed = dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"]", "{}\".format( n_fft, hop_size, n_mel ) ) elif name == \"raw\":", "inputs[\"labels\"] random_sample = dataset.random_clean_sample() new_audio, new_labels = mix_audio_and_labels( first_audio, random_sample[\"audio\"],", "= dict(inputs) if np.random.uniform() < self.p: original_size = inputs[\"audio\"].size target_size", "transformed = dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = np.flipud(inputs[\"audio\"])", "print( \"\\nUsing mel features with params:\\n\", \"n_fft: {}, hop_size: {},", "\"mel\": n_fft, hop_size, n_mel = args self.n_fft = int(n_fft) self.hop_size", "for old, new in self.mapping.items(): transformed[new] = transformed.pop(old) return transformed", "1e-4 def __init__(self, n_fft, hop_size): self.n_fft = n_fft self.hop_size =", "\"mel\": stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False )", "= read_audio(inputs[\"filename\"]) transformed = dict(inputs) transformed[\"audio\"] = audio transformed[\"sr\"] =", "class LoadAudio: def __init__(self): pass def __call__(self, dataset, **inputs): audio,", "transforms def switch_off_augmentations(self): for t in self.transforms: if isinstance(t, Augmentation):", "return transformed class AudioFeatures: eps = 1e-4 def __init__(self, descriptor,", "= ratio self.p = p def __call__(self, dataset, **inputs): transformed", "in inputs[\"raw_labels\"]: labels[self.class_map[c]] = 1.0 transformed = dict(inputs) transformed[\"labels\"] =", "return transformed class FlipAudio(Augmentation): def __init__(self, p): self.p = p", "= name if name == \"stft\": n_fft, hop_size = args", "inputs.items(): if not name in self.to_drop: transformed[name] = input return", "transformed = dict(inputs) if np.random.uniform() < self.p: original_size = inputs[\"audio\"].size", "new_audio, new_labels = mix_audio_and_labels( first_audio, random_sample[\"audio\"], first_labels, random_sample[\"labels\"] ) transformed[\"audio\"]", "dict(inputs) if (inputs[\"audio\"].size / inputs[\"sr\"]) > self.max_length: max_length = self.max_length", "- target_size - 1) transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size] return transformed class", "class Compose: def __init__(self, transforms): self.transforms = transforms def switch_off_augmentations(self):", "self.max_length: max_length = self.max_length * inputs[\"sr\"] start = np.random.randint(0, inputs[\"audio\"].size", "int(n_fft) self.hop_size = int(hop_size) self.n_features = self.n_fft // 2 +", "= new_labels return transformed class FlipAudio(Augmentation): def __init__(self, p): self.p", "\"n_fft: {}, hop_size: {}, n_mel: {}\".format( n_fft, hop_size, n_mel )", "transformed = dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = shuffle_audio(", "import pysndfx import librosa import numpy as np import torch", "transformed[\"sr\"] = sr return transformed class STFT: eps = 1e-4", "= int(hop_size) self.n_mel = int(n_mel) self.n_features = self.n_mel self.padding_value =", "hop_size = args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_features", "def __call__(self, dataset=None, **inputs): for t in self.transforms: inputs =", "self.n_fft // 2 + 1 self.padding_value = 0.0 if verbose:", "= np.random.randint(original_size - target_size - 1) transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size] return", "import numpy as np import torch from ops.audio import (", "descriptor, verbose=True): name, *args = descriptor.split(\"_\") self.feature_type = name if", "**inputs): for t in self.transforms: inputs = t(dataset=dataset, **inputs) return", "return transformed class RenameFields: def __init__(self, mapping): self.mapping = mapping", "= 44100 class Augmentation: \"\"\"A base class for data augmentation", "return transformed class STFT: eps = 1e-4 def __init__(self, n_fft,", "**inputs): transform = random.choice(self.transforms) return transform(**inputs) class DropFields: def __init__(self,", "self.p: first_audio, first_labels = inputs[\"audio\"], inputs[\"labels\"] random_sample = dataset.random_clean_sample() new_audio,", "2 + 1 self.padding_value = 0.0 if verbose: print( \"\\nUsing", "\"raw\": self.n_features = 1 self.padding_value = 0.0 if verbose: print(", "n_mel ) ) elif name == \"raw\": self.n_features = 1", "int(hop_size) self.n_features = self.n_fft // 2 + 1 self.padding_value =", "1) transformed[\"audio\"] = inputs[\"audio\"][start:start+target_size] return transformed class ShuffleAudio(Augmentation): def __init__(self,", "labels = np.zeros(len(self.class_map), dtype=np.float32) for c in inputs[\"raw_labels\"]: labels[self.class_map[c]] =", "__init__(self, chunk_length=0.5, p=0.5): self.chunk_length = chunk_length self.p = p def", "transformed.pop(old) return transformed class Compose: def __init__(self, transforms): self.transforms =", "as np import torch from ops.audio import ( read_audio, compute_stft,", "\"stft\": # stft = compute_stft( # inputs[\"audio\"], # window_size=self.n_fft, hop_size=self.hop_size,", ") transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"raw\": transformed[\"signal\"]", "0.0 if verbose: print( \"\\nUsing mel features with params:\\n\", \"n_fft:", "{}\".format( n_fft, hop_size ) ) elif name == \"mel\": n_fft,", "transform = random.choice(self.transforms) return transform(**inputs) class DropFields: def __init__(self, fields):", "+ 1 self.padding_value = 0.0 if verbose: print( \"\\nUsing STFT", "transformed = dict(inputs) if np.random.uniform() < self.p: first_audio, first_labels =", "pass class MapLabels: def __init__(self, class_map, drop_raw=True): self.class_map = class_map", "self.p: transformed[\"audio\"] = cutout( transformed[\"audio\"], self.area) return transformed class SampleLongAudio:", "if (inputs[\"audio\"].size / inputs[\"sr\"]) > self.max_length: max_length = self.max_length *", "t in self.transforms: inputs = t(dataset=dataset, **inputs) return inputs class", "__init__(self, ratio=(0.3, 0.9), p=1.0): self.min, self.max = ratio self.p =", "= max_length def __call__(self, dataset, **inputs): transformed = dict(inputs) if", "sr return transformed class STFT: eps = 1e-4 def __init__(self,", "transformed class AudioAugmentation(Augmentation): def __init__(self, p): self.p = p def", "dataset, **inputs): transformed = dict(inputs) if (inputs[\"audio\"].size / inputs[\"sr\"]) >", "= inputs[\"audio\"], inputs[\"labels\"] random_sample = dataset.random_clean_sample() new_audio, new_labels = mix_audio_and_labels(", "= np.zeros(len(self.class_map), dtype=np.float32) for c in inputs[\"raw_labels\"]: labels[self.class_map[c]] = 1.0", "if name == \"stft\": n_fft, hop_size = args self.n_fft =", "hop_size, n_mel ) ) elif name == \"raw\": self.n_features =", "self.max = ratio self.p = p def __call__(self, dataset, **inputs):", "def __init__(self, fields): self.to_drop = fields def __call__(self, dataset, **inputs):", "transformed class FlipAudio(Augmentation): def __init__(self, p): self.p = p def", "== \"raw\": transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) return transformed class SampleSegment(Augmentation):", "elif self.feature_type == \"raw\": transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1) return transformed", "read_audio(inputs[\"filename\"]) transformed = dict(inputs) transformed[\"audio\"] = audio transformed[\"sr\"] = sr", "transformed class SampleSegment(Augmentation): def __init__(self, ratio=(0.3, 0.9), p=1.0): self.min, self.max", "= inputs[\"audio\"][start:start+max_length] return transformed class OneOf: def __init__(self, transforms): self.transforms", "= dict(inputs) for old, new in self.mapping.items(): transformed[new] = transformed.pop(old)", "cutout( transformed[\"audio\"], self.area) return transformed class SampleLongAudio: def __init__(self, max_length):", "if verbose: print( \"\\nUsing STFT features with params:\\n\", \"n_fft: {},", "OneOf: def __init__(self, transforms): self.transforms = transforms def __call__(self, dataset,", "= dict(inputs) if self.feature_type == \"stft\": # stft = compute_stft(", "transformed class OneOf: def __init__(self, transforms): self.transforms = transforms def", "1e-4 def __init__(self, descriptor, verbose=True): name, *args = descriptor.split(\"_\") self.feature_type", "* inputs[\"sr\"] start = np.random.randint(0, inputs[\"audio\"].size - max_length) transformed[\"audio\"] =", "(inputs[\"audio\"].size / inputs[\"sr\"]) > self.max_length: max_length = self.max_length * inputs[\"sr\"]", "from ops.audio import ( read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout", "for data augmentation transforms\"\"\" pass class MapLabels: def __init__(self, class_map,", "= self.max_length * inputs[\"sr\"] start = np.random.randint(0, inputs[\"audio\"].size - max_length)", "np.flipud(inputs[\"audio\"]) return transformed class AudioAugmentation(Augmentation): def __init__(self, p): self.p =", "**inputs): transformed = dict(inputs) if (inputs[\"audio\"].size / inputs[\"sr\"]) > self.max_length:", "= n_fft self.hop_size = hop_size def __call__(self, dataset, **inputs): stft", "-1) elif self.feature_type == \"mel\": stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft,", "= 1e-4 def __init__(self, n_fft, hop_size): self.n_fft = n_fft self.hop_size", "self.max) * original_size) start = np.random.randint(original_size - target_size - 1)", "effects_chain(inputs[\"audio\"]) return transformed class LoadAudio: def __init__(self): pass def __call__(self,", "new_labels = mix_audio_and_labels( first_audio, random_sample[\"audio\"], first_labels, random_sample[\"labels\"] ) transformed[\"audio\"] =", "args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_features = self.n_fft", "**inputs): transformed = dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] =", "in inputs.items(): if not name in self.to_drop: transformed[name] = input", "old, new in self.mapping.items(): transformed[new] = transformed.pop(old) return transformed class", "inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed = dict(inputs) transformed[\"stft\"] = np.transpose(stft)", "= int(np.random.uniform(self.min, self.max) * original_size) start = np.random.randint(original_size - target_size", "self.chunk_length, sr=transformed[\"sr\"]) return transformed class CutOut(Augmentation): def __init__(self, area=0.25, p=0.5):", "0.9), p=1.0): self.min, self.max = ratio self.p = p def", "numpy as np import torch from ops.audio import ( read_audio,", "__call__(self, dataset, **inputs): labels = np.zeros(len(self.class_map), dtype=np.float32) for c in", "= 1 self.padding_value = 0.0 if verbose: print( \"\\nUsing raw", "class MixUp(Augmentation): def __init__(self, p): self.p = p def __call__(self,", "__init__(self, descriptor, verbose=True): name, *args = descriptor.split(\"_\") self.feature_type = name", "self.n_features = 1 self.padding_value = 0.0 if verbose: print( \"\\nUsing", "class Augmentation: \"\"\"A base class for data augmentation transforms\"\"\" pass", "__call__(self, dataset, **inputs): transformed = dict(inputs) if (inputs[\"audio\"].size / inputs[\"sr\"])", "= transforms def switch_off_augmentations(self): for t in self.transforms: if isinstance(t,", "random_sample = dataset.random_clean_sample() new_audio, new_labels = mix_audio_and_labels( first_audio, random_sample[\"audio\"], first_labels,", "dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = np.flipud(inputs[\"audio\"]) return transformed", "reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1))", "max_length def __call__(self, dataset, **inputs): transformed = dict(inputs) if (inputs[\"audio\"].size", "start = np.random.randint(0, inputs[\"audio\"].size - max_length) transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length] return", ".overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1)) ) transformed[\"audio\"] = effects_chain(inputs[\"audio\"]) return transformed", "transformed[\"audio\"] = np.flipud(inputs[\"audio\"]) return transformed class AudioAugmentation(Augmentation): def __init__(self, p):", "== \"raw\": self.n_features = 1 self.padding_value = 0.0 if verbose:", "dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = cutout( transformed[\"audio\"], self.area)", "class CutOut(Augmentation): def __init__(self, area=0.25, p=0.5): self.area = area self.p", "for name, input in inputs.items(): if not name in self.to_drop:", "__init__(self, class_map, drop_raw=True): self.class_map = class_map def __call__(self, dataset, **inputs):", "transformed class LoadAudio: def __init__(self): pass def __call__(self, dataset, **inputs):", "= sr return transformed class STFT: eps = 1e-4 def", "= np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"mel\": stft = compute_stft(", "transformed = dict(inputs) if np.random.uniform() < self.p: effects_chain = (", "trim_audio, mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE = 44100 class Augmentation:", "MapLabels: def __init__(self, class_map, drop_raw=True): self.class_map = class_map def __call__(self,", "inputs[\"audio\"][start:start+target_size] return transformed class ShuffleAudio(Augmentation): def __init__(self, chunk_length=0.5, p=0.5): self.chunk_length", "np import torch from ops.audio import ( read_audio, compute_stft, trim_audio,", "partial import json import pysndfx import librosa import numpy as", "self.padding_value = 0.0 if verbose: print( \"\\nUsing raw waveform features.\"", "transformed[\"audio\"] = effects_chain(inputs[\"audio\"]) return transformed class LoadAudio: def __init__(self): pass", "def __call__(self, dataset, **inputs): audio, sr = read_audio(inputs[\"filename\"]) transformed =", "transformed = dict(inputs) transformed[\"audio\"] = audio transformed[\"sr\"] = sr return", "transformed class CutOut(Augmentation): def __init__(self, area=0.25, p=0.5): self.area = area", "name, input in inputs.items(): if not name in self.to_drop: transformed[name]", "isinstance(t, Augmentation): t.p = 0.0 def __call__(self, dataset=None, **inputs): for", "params:\\n\", \"n_fft: {}, hop_size: {}\".format( n_fft, hop_size ) ) elif", "self.transforms: if isinstance(t, Augmentation): t.p = 0.0 def __call__(self, dataset=None,", ") elif name == \"mel\": n_fft, hop_size, n_mel = args", "eps = 1e-4 def __init__(self, n_fft, hop_size): self.n_fft = n_fft", "librosa import numpy as np import torch from ops.audio import", "def __init__(self, p): self.p = p def __call__(self, dataset, **inputs):", "== \"stft\": n_fft, hop_size = args self.n_fft = int(n_fft) self.hop_size", "44100 class Augmentation: \"\"\"A base class for data augmentation transforms\"\"\"", "drop_raw=True): self.class_map = class_map def __call__(self, dataset, **inputs): labels =", "= compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False ) transformed[\"signal\"] =", "verbose: print( \"\\nUsing raw waveform features.\" ) def __call__(self, dataset,", "np.expand_dims(inputs[\"audio\"], -1) elif self.feature_type == \"raw\": transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"], -1)", "import random import math from functools import partial import json", "SampleSegment(Augmentation): def __init__(self, ratio=(0.3, 0.9), p=1.0): self.min, self.max = ratio", "transformed class SampleLongAudio: def __init__(self, max_length): self.max_length = max_length def", "= transforms def __call__(self, dataset, **inputs): transform = random.choice(self.transforms) return", "( pysndfx.AudioEffectsChain() .reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2,", "FlipAudio(Augmentation): def __init__(self, p): self.p = p def __call__(self, dataset,", "hop_size, n_mel = args self.n_fft = int(n_fft) self.hop_size = int(hop_size)", "p=0.5): self.chunk_length = chunk_length self.p = p def __call__(self, dataset,", "audio transformed[\"sr\"] = sr return transformed class STFT: eps =", ".reverb( reverberance=random.randrange(50), room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9,", "window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps) transformed = dict(inputs) transformed[\"stft\"] = np.transpose(stft) return", "def __init__(self, area=0.25, p=0.5): self.area = area self.p = p", "inputs[\"sr\"] start = np.random.randint(0, inputs[\"audio\"].size - max_length) transformed[\"audio\"] = inputs[\"audio\"][start:start+max_length]", "> self.max_length: max_length = self.max_length * inputs[\"sr\"] start = np.random.randint(0,", "**inputs): transformed = dict() for name, input in inputs.items(): if", "class MapLabels: def __init__(self, class_map, drop_raw=True): self.class_map = class_map def", "np.random.uniform() < self.p: first_audio, first_labels = inputs[\"audio\"], inputs[\"labels\"] random_sample =", "= 1e-4 def __init__(self, descriptor, verbose=True): name, *args = descriptor.split(\"_\")", "n_fft, hop_size): self.n_fft = n_fft self.hop_size = hop_size def __call__(self,", "mapping def __call__(self, dataset, **inputs): transformed = dict(inputs) for old,", "inputs = t(dataset=dataset, **inputs) return inputs class Identity: def __call__(self,", "= int(n_fft) self.hop_size = int(hop_size) self.n_mel = int(n_mel) self.n_features =", "return transformed class MixUp(Augmentation): def __init__(self, p): self.p = p", "random import math from functools import partial import json import", "args self.n_fft = int(n_fft) self.hop_size = int(hop_size) self.n_mel = int(n_mel)", "params:\\n\", \"n_fft: {}, hop_size: {}, n_mel: {}\".format( n_fft, hop_size, n_mel", "transformed.pop(\"raw_labels\") return transformed class MixUp(Augmentation): def __init__(self, p): self.p =", "stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False ) transformed[\"signal\"]", "compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"],", "new in self.mapping.items(): transformed[new] = transformed.pop(old) return transformed class Compose:", "raw waveform features.\" ) def __call__(self, dataset, **inputs): transformed =", "def __init__(self, descriptor, verbose=True): name, *args = descriptor.split(\"_\") self.feature_type =", "= int(n_mel) self.n_features = self.n_mel self.padding_value = 0.0 if verbose:", "CutOut(Augmentation): def __init__(self, area=0.25, p=0.5): self.area = area self.p =", "transformed = dict(inputs) transformed[\"labels\"] = labels transformed.pop(\"raw_labels\") return transformed class", "\"stft\": n_fft, hop_size = args self.n_fft = int(n_fft) self.hop_size =", "class AudioAugmentation(Augmentation): def __init__(self, p): self.p = p def __call__(self,", "< self.p: transformed[\"audio\"] = cutout( transformed[\"audio\"], self.area) return transformed class", "\"\\nUsing mel features with params:\\n\", \"n_fft: {}, hop_size: {}, n_mel:", "random_sample[\"audio\"], first_labels, random_sample[\"labels\"] ) transformed[\"audio\"] = new_audio transformed[\"labels\"] = new_labels", "name == \"stft\": n_fft, hop_size = args self.n_fft = int(n_fft)", "mel features with params:\\n\", \"n_fft: {}, hop_size: {}, n_mel: {}\".format(", "class OneOf: def __init__(self, transforms): self.transforms = transforms def __call__(self,", "import json import pysndfx import librosa import numpy as np", "def switch_off_augmentations(self): for t in self.transforms: if isinstance(t, Augmentation): t.p", "original_size = inputs[\"audio\"].size target_size = int(np.random.uniform(self.min, self.max) * original_size) start", "= class_map def __call__(self, dataset, **inputs): labels = np.zeros(len(self.class_map), dtype=np.float32)", "int(n_mel) self.n_features = self.n_mel self.padding_value = 0.0 if verbose: print(", "eps=self.eps) transformed = dict(inputs) transformed[\"stft\"] = np.transpose(stft) return transformed class", "AudioFeatures: eps = 1e-4 def __init__(self, descriptor, verbose=True): name, *args", "LoadAudio: def __init__(self): pass def __call__(self, dataset, **inputs): audio, sr", "compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE = 44100 class", "__init__(self, p): self.p = p def __call__(self, dataset, **inputs): transformed", "= dict(inputs) transformed[\"stft\"] = np.transpose(stft) return transformed class AudioFeatures: eps", "= labels transformed.pop(\"raw_labels\") return transformed class MixUp(Augmentation): def __init__(self, p):", "room_scale=random.randrange(50), stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1)) )", "chunk_length self.p = p def __call__(self, dataset, **inputs): transformed =", "p=1.0): self.min, self.max = ratio self.p = p def __call__(self,", "= 0.0 if verbose: print( \"\\nUsing mel features with params:\\n\",", "__call__(self, dataset, **inputs): transform = random.choice(self.transforms) return transform(**inputs) class DropFields:", "transformed = dict(inputs) transformed[\"stft\"] = np.transpose(stft) return transformed class AudioFeatures:", "area self.p = p def __call__(self, dataset, **inputs): transformed =", "window_size=self.n_fft, hop_size=self.hop_size, # eps=self.eps, log=True # ) transformed[\"signal\"] = np.expand_dims(inputs[\"audio\"],", "transforms def __call__(self, dataset, **inputs): transform = random.choice(self.transforms) return transform(**inputs)", "t.p = 0.0 def __call__(self, dataset=None, **inputs): for t in", "self.transforms = transforms def __call__(self, dataset, **inputs): transform = random.choice(self.transforms)", "return transformed class Compose: def __init__(self, transforms): self.transforms = transforms", "json import pysndfx import librosa import numpy as np import", "transformed[\"audio\"], self.chunk_length, sr=transformed[\"sr\"]) return transformed class CutOut(Augmentation): def __init__(self, area=0.25,", "np.expand_dims(inputs[\"audio\"], -1) return transformed class SampleSegment(Augmentation): def __init__(self, ratio=(0.3, 0.9),", "max_length): self.max_length = max_length def __call__(self, dataset, **inputs): transformed =", "sr = read_audio(inputs[\"filename\"]) transformed = dict(inputs) transformed[\"audio\"] = audio transformed[\"sr\"]", "return transformed class SampleSegment(Augmentation): def __init__(self, ratio=(0.3, 0.9), p=1.0): self.min,", "dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = shuffle_audio( transformed[\"audio\"], self.chunk_length,", "class DropFields: def __init__(self, fields): self.to_drop = fields def __call__(self,", "self.hop_size = hop_size def __call__(self, dataset, **inputs): stft = compute_stft(", "verbose: print( \"\\nUsing STFT features with params:\\n\", \"n_fft: {}, hop_size:", "dataset, **inputs): transform = random.choice(self.transforms) return transform(**inputs) class DropFields: def", "= dict(inputs) if np.random.uniform() < self.p: effects_chain = ( pysndfx.AudioEffectsChain()", "0.0 if verbose: print( \"\\nUsing raw waveform features.\" ) def", "labels transformed.pop(\"raw_labels\") return transformed class MixUp(Augmentation): def __init__(self, p): self.p", "= fields def __call__(self, dataset, **inputs): transformed = dict() for", "self.n_mel = int(n_mel) self.n_features = self.n_mel self.padding_value = 0.0 if", "hop_size ) ) elif name == \"mel\": n_fft, hop_size, n_mel", "def __call__(self, dataset, **inputs): transformed = dict() for name, input", "= input return transformed class RenameFields: def __init__(self, mapping): self.mapping", "== \"mel\": stft = compute_stft( inputs[\"audio\"], window_size=self.n_fft, hop_size=self.hop_size, eps=self.eps, log=False", "__call__(self, dataset=None, **inputs): for t in self.transforms: inputs = t(dataset=dataset,", "= dict(inputs) transformed[\"audio\"] = audio transformed[\"sr\"] = sr return transformed", "= dict(inputs) if np.random.uniform() < self.p: transformed[\"audio\"] = shuffle_audio( transformed[\"audio\"],", "pass def __call__(self, dataset, **inputs): audio, sr = read_audio(inputs[\"filename\"]) transformed", "class SampleSegment(Augmentation): def __init__(self, ratio=(0.3, 0.9), p=1.0): self.min, self.max =", "= dict() for name, input in inputs.items(): if not name", "stereo_depth=random.randrange(50) ) .pitch(shift=random.randrange(-300, 300)) .overdrive(gain=random.randrange(2, 10)) .speed(random.uniform(0.9, 1.1)) ) transformed[\"audio\"]", "features.\" ) def __call__(self, dataset, **inputs): transformed = dict(inputs) if", "= 0.0 if verbose: print( \"\\nUsing raw waveform features.\" )", "DropFields: def __init__(self, fields): self.to_drop = fields def __call__(self, dataset," ]
[ "u = poisson(gamma=g, mesh=mesh) # Save solution fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\"", "File, RectangleMesh, Point mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36) #", "outfile): g=sample[1] u = poisson(gamma=g, mesh=mesh) # Save solution fname", "poisson(gamma=g, mesh=mesh) # Save solution fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w')", "fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w') << u return {int(sample[0]): {'u':", "MPI # comm = MPI.COMM_WORLD # rank = comm.Get_rank() if", "parser.add_argument('-n', '--num', default = 10, type=int, help=\"Number of samples\") parser.add_argument('-o',", "args.dist outfile = args.outfile.replace('.pkl','') inputdim = args.input_dim if inputdim ==", "rank = comm.Get_rank() if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description=\"Poisson", "inputdim = args.input_dim if inputdim == 1: # U[1,5] randsamples", "= np.random.randn(num_samples, inputdim) elif dist == 'u': randsamples = -4*np.random.rand(num_samples,", "36, 36) # comm = mesh.mpi_comm() set_log_level(40) # ERROR=40 #", "np from fenics import set_log_level, File, RectangleMesh, Point mesh =", "if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n', '--num',", "parser.parse_args() num_samples = args.num dist = args.dist outfile = args.outfile.replace('.pkl','')", "= RectangleMesh(Point(0,0), Point(1,1), 36, 36) # comm = mesh.mpi_comm() set_log_level(40)", "'1' from newpoisson import poisson import numpy as np from", "= args.dist outfile = args.outfile.replace('.pkl','') inputdim = args.input_dim if inputdim", "= list(zip(range(num_samples), randsamples)) def wrapper(sample, outfile): g=sample[1] u = poisson(gamma=g,", "return {int(sample[0]): {'u': fname, 'gamma': sample[1]}} results = [] for", "'gamma': sample[1]}} results = [] for sample in sample_seed_list: r", "# comm = mesh.mpi_comm() set_log_level(40) # ERROR=40 # from mpi4py", "= 1 + 4*np.random.rand(num_samples) else: # N(0,1) if dist ==", "if dist == 'n': randsamples = np.random.randn(num_samples, inputdim) elif dist", "type=int, help=\"Number of samples\") parser.add_argument('-o', '--outfile', default='results', help=\"Output filename (no", "= parser.parse_args() num_samples = args.num dist = args.dist outfile =", "import MPI # comm = MPI.COMM_WORLD # rank = comm.Get_rank()", "poisson import numpy as np from fenics import set_log_level, File,", "'n': randsamples = np.random.randn(num_samples, inputdim) elif dist == 'u': randsamples", "'--input-dim', default=1, type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u`", "randsamples = -4*np.random.rand(num_samples, inputdim) else: raise ValueError(\"Improper distribution choice, use", "= '1' from newpoisson import poisson import numpy as np", "inputdim == 1: # U[1,5] randsamples = 1 + 4*np.random.rand(num_samples)", "help=\"Output filename (no extension)\") parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d', '--dist',", "sample_seed_list: r = wrapper(sample, outfile) results.append(r) # print(results) import pickle", "# comm = MPI.COMM_WORLD # rank = comm.Get_rank() if __name__=='__main__':", "Point(1,1), 36, 36) # comm = mesh.mpi_comm() set_log_level(40) # ERROR=40", "== 'u': randsamples = -4*np.random.rand(num_samples, inputdim) else: raise ValueError(\"Improper distribution", "= [] for sample in sample_seed_list: r = wrapper(sample, outfile)", "MPI.COMM_WORLD # rank = comm.Get_rank() if __name__=='__main__': import argparse parser", "= comm.Get_rank() if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description=\"Poisson Problem\")", "# rank = comm.Get_rank() if __name__=='__main__': import argparse parser =", "from newpoisson import poisson import numpy as np from fenics", "__name__=='__main__': import argparse parser = argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n', '--num', default", "sample_seed_list = list(zip(range(num_samples), randsamples)) def wrapper(sample, outfile): g=sample[1] u =", "RectangleMesh, Point mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36) # comm", "# ERROR=40 # from mpi4py import MPI # comm =", "File(fname, 'w') << u return {int(sample[0]): {'u': fname, 'gamma': sample[1]}}", "36) # comm = mesh.mpi_comm() set_log_level(40) # ERROR=40 # from", "(no extension)\") parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution.", "r = wrapper(sample, outfile) results.append(r) # print(results) import pickle pickle.dump(results,", "mesh.mpi_comm() set_log_level(40) # ERROR=40 # from mpi4py import MPI #", "mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36) # comm = mesh.mpi_comm()", "4*np.random.rand(num_samples) else: # N(0,1) if dist == 'n': randsamples =", "default='results', help=\"Output filename (no extension)\") parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d',", "[] for sample in sample_seed_list: r = wrapper(sample, outfile) results.append(r)", "extension)\") parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution. `n`", "# N(0,1) if dist == 'n': randsamples = np.random.randn(num_samples, inputdim)", "comm = mesh.mpi_comm() set_log_level(40) # ERROR=40 # from mpi4py import", "== 1: # U[1,5] randsamples = 1 + 4*np.random.rand(num_samples) else:", "parser = argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n', '--num', default = 10, type=int,", "args = parser.parse_args() num_samples = args.num dist = args.dist outfile", "args.outfile.replace('.pkl','') inputdim = args.input_dim if inputdim == 1: # U[1,5]", "`n` (normal), `u` (uniform, default)') args = parser.parse_args() num_samples =", "U[1,5] randsamples = 1 + 4*np.random.rand(num_samples) else: # N(0,1) if", "import numpy as np from fenics import set_log_level, File, RectangleMesh,", "#!/usr/env/bin python import os # os.environ['OMP_NUM_THREADS'] = '1' from newpoisson", "1 + 4*np.random.rand(num_samples) else: # N(0,1) if dist == 'n':", "1: # U[1,5] randsamples = 1 + 4*np.random.rand(num_samples) else: #", "randsamples = 1 + 4*np.random.rand(num_samples) else: # N(0,1) if dist", "newpoisson import poisson import numpy as np from fenics import", "= argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n', '--num', default = 10, type=int, help=\"Number", "os # os.environ['OMP_NUM_THREADS'] = '1' from newpoisson import poisson import", "help='Distribution. `n` (normal), `u` (uniform, default)') args = parser.parse_args() num_samples", "parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)') args", "fname, 'gamma': sample[1]}} results = [] for sample in sample_seed_list:", "randsamples)) def wrapper(sample, outfile): g=sample[1] u = poisson(gamma=g, mesh=mesh) #", "as np from fenics import set_log_level, File, RectangleMesh, Point mesh", "`u` (uniform)\") sample_seed_list = list(zip(range(num_samples), randsamples)) def wrapper(sample, outfile): g=sample[1]", "Save solution fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w') << u return", "'u': randsamples = -4*np.random.rand(num_samples, inputdim) else: raise ValueError(\"Improper distribution choice,", "`n` (normal), `u` (uniform)\") sample_seed_list = list(zip(range(num_samples), randsamples)) def wrapper(sample,", "= MPI.COMM_WORLD # rank = comm.Get_rank() if __name__=='__main__': import argparse", "comm.Get_rank() if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n',", "default = 10, type=int, help=\"Number of samples\") parser.add_argument('-o', '--outfile', default='results',", "else: raise ValueError(\"Improper distribution choice, use `n` (normal), `u` (uniform)\")", "dist = args.dist outfile = args.outfile.replace('.pkl','') inputdim = args.input_dim if", "parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal),", "N(0,1) if dist == 'n': randsamples = np.random.randn(num_samples, inputdim) elif", "choice, use `n` (normal), `u` (uniform)\") sample_seed_list = list(zip(range(num_samples), randsamples))", "default=1, type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform,", "'--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)') args =", "dist == 'n': randsamples = np.random.randn(num_samples, inputdim) elif dist ==", "type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)')", "'--num', default = 10, type=int, help=\"Number of samples\") parser.add_argument('-o', '--outfile',", "of samples\") parser.add_argument('-o', '--outfile', default='results', help=\"Output filename (no extension)\") parser.add_argument('-i',", "sample[1]}} results = [] for sample in sample_seed_list: r =", "Problem\") parser.add_argument('-n', '--num', default = 10, type=int, help=\"Number of samples\")", "RectangleMesh(Point(0,0), Point(1,1), 36, 36) # comm = mesh.mpi_comm() set_log_level(40) #", "set_log_level(40) # ERROR=40 # from mpi4py import MPI # comm", "help=\"Number of samples\") parser.add_argument('-o', '--outfile', default='results', help=\"Output filename (no extension)\")", "(uniform, default)') args = parser.parse_args() num_samples = args.num dist =", "# U[1,5] randsamples = 1 + 4*np.random.rand(num_samples) else: # N(0,1)", "wrapper(sample, outfile): g=sample[1] u = poisson(gamma=g, mesh=mesh) # Save solution", "= f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w') << u return {int(sample[0]): {'u': fname,", "-4*np.random.rand(num_samples, inputdim) else: raise ValueError(\"Improper distribution choice, use `n` (normal),", "10, type=int, help=\"Number of samples\") parser.add_argument('-o', '--outfile', default='results', help=\"Output filename", "= args.input_dim if inputdim == 1: # U[1,5] randsamples =", "import set_log_level, File, RectangleMesh, Point mesh = RectangleMesh(Point(0,0), Point(1,1), 36,", "(normal), `u` (uniform)\") sample_seed_list = list(zip(range(num_samples), randsamples)) def wrapper(sample, outfile):", "import os # os.environ['OMP_NUM_THREADS'] = '1' from newpoisson import poisson", "outfile = args.outfile.replace('.pkl','') inputdim = args.input_dim if inputdim == 1:", "argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n', '--num', default = 10, type=int, help=\"Number of", "ERROR=40 # from mpi4py import MPI # comm = MPI.COMM_WORLD", "= poisson(gamma=g, mesh=mesh) # Save solution fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname,", "numpy as np from fenics import set_log_level, File, RectangleMesh, Point", "fenics import set_log_level, File, RectangleMesh, Point mesh = RectangleMesh(Point(0,0), Point(1,1),", "<< u return {int(sample[0]): {'u': fname, 'gamma': sample[1]}} results =", "sample in sample_seed_list: r = wrapper(sample, outfile) results.append(r) # print(results)", "num_samples = args.num dist = args.dist outfile = args.outfile.replace('.pkl','') inputdim", "inputdim) elif dist == 'u': randsamples = -4*np.random.rand(num_samples, inputdim) else:", "distribution choice, use `n` (normal), `u` (uniform)\") sample_seed_list = list(zip(range(num_samples),", "comm = MPI.COMM_WORLD # rank = comm.Get_rank() if __name__=='__main__': import", "{int(sample[0]): {'u': fname, 'gamma': sample[1]}} results = [] for sample", "'--outfile', default='results', help=\"Output filename (no extension)\") parser.add_argument('-i', '--input-dim', default=1, type=int)", "# from mpi4py import MPI # comm = MPI.COMM_WORLD #", "from mpi4py import MPI # comm = MPI.COMM_WORLD # rank", "filename (no extension)\") parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d', '--dist', default='u',", "== 'n': randsamples = np.random.randn(num_samples, inputdim) elif dist == 'u':", "= 10, type=int, help=\"Number of samples\") parser.add_argument('-o', '--outfile', default='results', help=\"Output", "# Save solution fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w') << u", "solution fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w') << u return {int(sample[0]):", "dist == 'u': randsamples = -4*np.random.rand(num_samples, inputdim) else: raise ValueError(\"Improper", "elif dist == 'u': randsamples = -4*np.random.rand(num_samples, inputdim) else: raise", "args.num dist = args.dist outfile = args.outfile.replace('.pkl','') inputdim = args.input_dim", "g=sample[1] u = poisson(gamma=g, mesh=mesh) # Save solution fname =", "raise ValueError(\"Improper distribution choice, use `n` (normal), `u` (uniform)\") sample_seed_list", "import argparse parser = argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n', '--num', default =", "inputdim) else: raise ValueError(\"Improper distribution choice, use `n` (normal), `u`", "'w') << u return {int(sample[0]): {'u': fname, 'gamma': sample[1]}} results", "= wrapper(sample, outfile) results.append(r) # print(results) import pickle pickle.dump(results, open(f'{outfile}.pkl','wb'))", "def wrapper(sample, outfile): g=sample[1] u = poisson(gamma=g, mesh=mesh) # Save", "else: # N(0,1) if dist == 'n': randsamples = np.random.randn(num_samples,", "python import os # os.environ['OMP_NUM_THREADS'] = '1' from newpoisson import", "in sample_seed_list: r = wrapper(sample, outfile) results.append(r) # print(results) import", "{'u': fname, 'gamma': sample[1]}} results = [] for sample in", "f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w') << u return {int(sample[0]): {'u': fname, 'gamma':", "for sample in sample_seed_list: r = wrapper(sample, outfile) results.append(r) #", "import poisson import numpy as np from fenics import set_log_level,", "= args.outfile.replace('.pkl','') inputdim = args.input_dim if inputdim == 1: #", "os.environ['OMP_NUM_THREADS'] = '1' from newpoisson import poisson import numpy as", "Point mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36) # comm =", "= mesh.mpi_comm() set_log_level(40) # ERROR=40 # from mpi4py import MPI", "parser.add_argument('-o', '--outfile', default='results', help=\"Output filename (no extension)\") parser.add_argument('-i', '--input-dim', default=1,", "list(zip(range(num_samples), randsamples)) def wrapper(sample, outfile): g=sample[1] u = poisson(gamma=g, mesh=mesh)", "if inputdim == 1: # U[1,5] randsamples = 1 +", "u return {int(sample[0]): {'u': fname, 'gamma': sample[1]}} results = []", "mesh=mesh) # Save solution fname = f\"{outfile}-data/poisson-{int(sample[0]):06d}.xml\" File(fname, 'w') <<", "(normal), `u` (uniform, default)') args = parser.parse_args() num_samples = args.num", "default)') args = parser.parse_args() num_samples = args.num dist = args.dist", "`u` (uniform, default)') args = parser.parse_args() num_samples = args.num dist", "mpi4py import MPI # comm = MPI.COMM_WORLD # rank =", "argparse parser = argparse.ArgumentParser(description=\"Poisson Problem\") parser.add_argument('-n', '--num', default = 10,", "= args.num dist = args.dist outfile = args.outfile.replace('.pkl','') inputdim =", "+ 4*np.random.rand(num_samples) else: # N(0,1) if dist == 'n': randsamples", "default='u', help='Distribution. `n` (normal), `u` (uniform, default)') args = parser.parse_args()", "randsamples = np.random.randn(num_samples, inputdim) elif dist == 'u': randsamples =", "use `n` (normal), `u` (uniform)\") sample_seed_list = list(zip(range(num_samples), randsamples)) def", "results = [] for sample in sample_seed_list: r = wrapper(sample,", "# os.environ['OMP_NUM_THREADS'] = '1' from newpoisson import poisson import numpy", "args.input_dim if inputdim == 1: # U[1,5] randsamples = 1", "ValueError(\"Improper distribution choice, use `n` (normal), `u` (uniform)\") sample_seed_list =", "from fenics import set_log_level, File, RectangleMesh, Point mesh = RectangleMesh(Point(0,0),", "= -4*np.random.rand(num_samples, inputdim) else: raise ValueError(\"Improper distribution choice, use `n`", "set_log_level, File, RectangleMesh, Point mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36)", "(uniform)\") sample_seed_list = list(zip(range(num_samples), randsamples)) def wrapper(sample, outfile): g=sample[1] u", "np.random.randn(num_samples, inputdim) elif dist == 'u': randsamples = -4*np.random.rand(num_samples, inputdim)", "samples\") parser.add_argument('-o', '--outfile', default='results', help=\"Output filename (no extension)\") parser.add_argument('-i', '--input-dim'," ]
[ "# Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): \"\"\" Prüft ein Polynom auf", "\"\"\" if polynomial.degree() < 0: return logging.error(\"Polynom ungültig\") const_coefficient =", "\"\"\"Highest common factor\"\"\" if y == 0: return x else:", "ist als die absolute Summe der restlichen Koeffizienten \"\"\" if", "= polynomial.coefficients[0] if const_coefficient == 0: return 0 lead_coefficient =", "def is_irreducible_eisenstein(polynomial): \"\"\" Eine Implementierung des Eisensteinkriteriums. \"\"\" # Polynom", "polynomial.degree() == 0: return True for x, y in itertools.combinations(non_zero_polynomial,", "rekursive Implementierung von HCF def hcf(x, y): \"\"\"Highest common factor\"\"\"", "in range(1, n + 1): if n % i ==", "coeff % p != 0: return 2 # teilt die", "n > 1: factors.append(n) return factors # rekursive Implementierung von", "import helper import itertools def factor(n): # Faktorisierung einer Zahl", "<= n: if n % i: i += 1 else:", "1]: if coeff % p != 0: return 2 #", "n: if n % i: i += 1 else: n", "!= 0 ] # Nullen würden Ergebnis von HCF verfälschen", "(Perron). Führender Koeffizient != 1 funktioniert nicht. Keine Aussage möglich,", "vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen", "] # Nullen würden Ergebnis von HCF verfälschen if polynomial.degree()", "!= 1: return False return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def", "Primfaktorzerlegung einer Zahl n i = 2 factors = []", "teilt. p^2 darf a0 nicht teilen const_coeff = polynomial.coefficients[0] if", "teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is False): return 2 # Prüfe,", "0 ] # Nullen würden Ergebnis von HCF verfälschen if", "# Primfaktorzerlegung einer Zahl n i = 2 factors =", "n % i: i += 1 else: n //= i", "gibt, die alle Koeffizienten des Polynoms bis Grad m -", "prime_factor(n): # Primfaktorzerlegung einer Zahl n i = 2 factors", "für Eisenstein sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is False): return", "kann keine Aussage getroffen werden return 2 for coeff in", "1 return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial):", "in itertools.combinations(non_zero_polynomial, 2): if hcf(x, y) != 1: return False", "der restlichen Koeffizienten \"\"\" if polynomial.degree() < 0: return logging.error(\"Polynom", "factors.append(n) return factors # rekursive Implementierung von HCF def hcf(x,", "return 2 # Voraussetzung für Eisenstein sind teilerfremde Koeffizienten if", "polynomial.degree() < 1: return 2 # Voraussetzung für Eisenstein sind", "2 factors = [] while i * i <= n:", "- 1]: if coeff % p != 0: return 2", "polynomial.coefficients: if i < polynomial.degree() - 1: total += abs(coeff)", "teilerfremd (coprime) ist\"\"\" non_zero_polynomial = [ i for i in", "is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob ein Polynom teilerfremd (coprime) ist\"\"\" non_zero_polynomial =", "Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten \"\"\"", "return 2 for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]:", "0: factors.append(i) return factors def prime_factor(n): # Primfaktorzerlegung einer Zahl", "Eisenstein sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is False): return 2", "if n % i == 0: factors.append(i) return factors def", "nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1]) total = 1 i =", "zu erhalten prime_factors = helper.prime_factor(const_coeff) for p in prime_factors: if", "funktioniert nicht. Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist", "polynomial.degree() - 1: total += abs(coeff) i = i +", "darf a0 nicht teilen const_coeff = polynomial.coefficients[0] if const_coeff ==", "verfälschen if polynomial.degree() == 0: return True for x, y", "0: return x else: return hcf(y, x % y) def", "lead_coefficient == 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1]) total =", "is False): return 2 # Prüfe, ob es eine Primzahl", "eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad", "Implementierung des Eisensteinkriteriums. \"\"\" # Polynom muss einen Grad m", "= [ i for i in polynomial.coefficients if i !=", "einen Grad m >= 1 haben if polynomial.degree() < 1:", "coeff in polynomial.coefficients: if i < polynomial.degree() - 1: total", "Primzahlen zu erhalten prime_factors = helper.prime_factor(const_coeff) for p in prime_factors:", "erhalten prime_factors = helper.prime_factor(const_coeff) for p in prime_factors: if (", "1 haben if polynomial.degree() < 1: return 2 # Voraussetzung", "\"\"\"Überprüft, ob ein Polynom teilerfremd (coprime) ist\"\"\" non_zero_polynomial = [", "return x else: return hcf(y, x % y) def is_polynomial_coprime(polynomial):", "factor(n): # Faktorisierung einer Zahl n i = 0 factors", "return factors # rekursive Implementierung von HCF def hcf(x, y):", "logging import helper import itertools def factor(n): # Faktorisierung einer", "Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): \"\"\" Prüft ein Polynom auf Irreduzierbarkeit", "Führender Koeffizient != 1 funktioniert nicht. Keine Aussage möglich, wenn", "False): return 2 # Prüfe, ob es eine Primzahl gibt,", "i <= n: if n % i: i += 1", "in prime_factors: if ( const_coeff % pow(p, 2) != 0", "Implementierung von HCF def hcf(x, y): \"\"\"Highest common factor\"\"\" if", "return False return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): \"\"\"", "y in itertools.combinations(non_zero_polynomial, 2): if hcf(x, y) != 1: return", "sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is False): return 2 #", "+ 1): if n % i == 0: factors.append(i) return", "https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): \"\"\" Eine Implementierung des Eisensteinkriteriums.", "if i < polynomial.degree() - 1: total += abs(coeff) i", "y == 0: return x else: return hcf(y, x %", "auf Irreduzierbarkeit (Perron). Führender Koeffizient != 1 funktioniert nicht. Keine", "getroffen werden return 2 for coeff in polynomial.coefficients[0 : polynomial.degree()", "helper import itertools def factor(n): # Faktorisierung einer Zahl n", "hcf(x, y) != 1: return False return True # Quelle:", "i * i <= n: if n % i: i", "factors.append(i) return factors def prime_factor(n): # Primfaktorzerlegung einer Zahl n", "//= i factors.append(i) if n > 1: factors.append(n) return factors", "return logging.error(\"Polynom ungültig\") const_coefficient = polynomial.coefficients[0] if const_coefficient == 0:", "Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome vom Typ Polynomial, keine", "p in prime_factors: if ( const_coeff % pow(p, 2) !=", "werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten", "const_coefficient = polynomial.coefficients[0] if const_coefficient == 0: return 0 lead_coefficient", "return True for x, y in itertools.combinations(non_zero_polynomial, 2): if hcf(x,", "if const_coeff == 0: return 0 # Erhalte Primfaktorzerlegung der", "Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten des", "y) def is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob ein Polynom teilerfremd (coprime) ist\"\"\"", "Listen von Koeffizienten \"\"\" import logging import helper import itertools", "Implementiert wurden das Eisenstein- und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf", "- 1: total += abs(coeff) i = i + 1", "# http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): \"\"\" Eine Implementierung des Eisensteinkriteriums. \"\"\"", "n i = 0 factors = [] for i in", "von HCF def hcf(x, y): \"\"\"Highest common factor\"\"\" if y", "import itertools def factor(n): # Faktorisierung einer Zahl n i", "i = 0 for coeff in polynomial.coefficients: if i <", "m >= 1 haben if polynomial.degree() < 1: return 2", "const_coeff = polynomial.coefficients[0] if const_coeff == 0: return 0 #", "es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis", "return 2 # Prüfe, ob es eine Primzahl gibt, die", "> 1: factors.append(n) return factors # rekursive Implementierung von HCF", "< 0: return logging.error(\"Polynom ungültig\") const_coefficient = polynomial.coefficients[0] if const_coefficient", "x, y in itertools.combinations(non_zero_polynomial, 2): if hcf(x, y) != 1:", "if hcf(x, y) != 1: return False return True #", "Faktorisierung einer Zahl n i = 0 factors = []", "Zahl n i = 0 factors = [] for i", "False return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): \"\"\" Prüft", "das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome vom Typ", "- 1]) total = 1 i = 0 for coeff", "% i == 0: factors.append(i) return factors def prime_factor(n): #", "Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome vom Typ Polynomial,", "HCF def hcf(x, y): \"\"\"Highest common factor\"\"\" if y ==", "== 0: return 0 # Erhalte Primfaktorzerlegung der Konstante, um", "range(1, n + 1): if n % i == 0:", "den Koeffizienten nicht, kann keine Aussage getroffen werden return 1", "y) != 1: return False return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf", "helper.is_polynomial_coprime(polynomial is False): return 2 # Prüfe, ob es eine", "= polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() -", "> total: return 1 return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf #", "return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1 nm1_coefficient", "p != 0: return 2 # teilt die Primzahl den", "x % y) def is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob ein Polynom teilerfremd", "polynomial.degree() - 1]: if coeff % p != 0: return", "Koeffizient != 1 funktioniert nicht. Keine Aussage möglich, wenn vorletzer", "Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten prime_factors", "common factor\"\"\" if y == 0: return x else: return", "i factors.append(i) if n > 1: factors.append(n) return factors #", "2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): \"\"\" Eine", "factors = [] for i in range(1, n + 1):", "[] while i * i <= n: if n %", "p^2 darf a0 nicht teilen const_coeff = polynomial.coefficients[0] if const_coeff", "= 0 factors = [] for i in range(1, n", "True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): \"\"\" Prüft ein Polynom", "def is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob ein Polynom teilerfremd (coprime) ist\"\"\" non_zero_polynomial", "der Konstante, um Grundlage von Primzahlen zu erhalten prime_factors =", "in polynomial.coefficients[0 : polynomial.degree() - 1]: if coeff % p", "# Faktorisierung einer Zahl n i = 0 factors =", "n //= i factors.append(i) if n > 1: factors.append(n) return", "teilen const_coeff = polynomial.coefficients[0] if const_coeff == 0: return 0", "von HCF verfälschen if polynomial.degree() == 0: return True for", "[] for i in range(1, n + 1): if n", "!= 0 ): # teilt p^2 den konstanten Koeffizienten, dann", "= polynomial.coefficients[0] if const_coeff == 0: return 0 # Erhalte", "alle Koeffizienten des Polynoms bis Grad m - 1 teilt.", "if coeff % p != 0: return 2 # teilt", "Irreduzibilitätskriterien Implementiert wurden das Eisenstein- und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf", "== 0: return x else: return hcf(y, x % y)", "wurden das Eisenstein- und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben", "return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): \"\"\" Prüft ein", "lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree()", "1 teilt. p^2 darf a0 nicht teilen const_coeff = polynomial.coefficients[0]", "Typ Polynomial, keine direkten Listen von Koeffizienten \"\"\" import logging", "Konstante, um Grundlage von Primzahlen zu erhalten prime_factors = helper.prime_factor(const_coeff)", "0: return logging.error(\"Polynom ungültig\") const_coefficient = polynomial.coefficients[0] if const_coefficient ==", "y): \"\"\"Highest common factor\"\"\" if y == 0: return x", "( const_coeff % pow(p, 2) != 0 ): # teilt", "absolute Summe der restlichen Koeffizienten \"\"\" if polynomial.degree() < 0:", "if ( const_coeff % pow(p, 2) != 0 ): #", "um Grundlage von Primzahlen zu erhalten prime_factors = helper.prime_factor(const_coeff) for", "hcf(y, x % y) def is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob ein Polynom", "in polynomial.coefficients if i != 0 ] # Nullen würden", "1: return 2 # Voraussetzung für Eisenstein sind teilerfremde Koeffizienten", "Koeffizienten \"\"\" import logging import helper import itertools def factor(n):", "abs(coeff) i = i + 1 if nm1_coefficient > total:", "Polynomial, keine direkten Listen von Koeffizienten \"\"\" import logging import", "0: return 2 # teilt die Primzahl den Koeffizienten nicht,", "Aussage getroffen werden return 2 for coeff in polynomial.coefficients[0 :", "- 1 teilt. p^2 darf a0 nicht teilen const_coeff =", "if polynomial.degree() == 0: return True for x, y in", "# teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage", "for coeff in polynomial.coefficients: if i < polynomial.degree() - 1:", "= 0 for coeff in polynomial.coefficients: if i < polynomial.degree()", "1): if n % i == 0: factors.append(i) return factors", "total += abs(coeff) i = i + 1 if nm1_coefficient", "+ 1 if nm1_coefficient > total: return 1 return 2", ": polynomial.degree() - 1]: if coeff % p != 0:", "haben if polynomial.degree() < 1: return 2 # Voraussetzung für", "const_coeff % pow(p, 2) != 0 ): # teilt p^2", "total = 1 i = 0 for coeff in polynomial.coefficients:", "== 0: factors.append(i) return factors def prime_factor(n): # Primfaktorzerlegung einer", "for i in polynomial.coefficients if i != 0 ] #", "i == 0: factors.append(i) return factors def prime_factor(n): # Primfaktorzerlegung", "Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die", "i in range(1, n + 1): if n % i", "% pow(p, 2) != 0 ): # teilt p^2 den", "def factor(n): # Faktorisierung einer Zahl n i = 0", "# Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten", "HCF verfälschen if polynomial.degree() == 0: return True for x,", "den konstanten Koeffizienten, dann kann keine Aussage getroffen werden return", "restlichen Koeffizienten \"\"\" if polynomial.degree() < 0: return logging.error(\"Polynom ungültig\")", "!= 1 funktioniert nicht. Keine Aussage möglich, wenn vorletzer Koeffizient", "0 ): # teilt p^2 den konstanten Koeffizienten, dann kann", "nicht teilen const_coeff = polynomial.coefficients[0] if const_coeff == 0: return", "1: return False return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial):", "wenn vorletzer Koeffizient kleiner ist als die absolute Summe der", "Summe der restlichen Koeffizienten \"\"\" if polynomial.degree() < 0: return", "ein Polynom auf Irreduzierbarkeit (Perron). Führender Koeffizient != 1 funktioniert", "Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m", "i: i += 1 else: n //= i factors.append(i) if", "1]) total = 1 i = 0 for coeff in", "Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten", "# Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): \"\"\" Eine Implementierung", "return factors def prime_factor(n): # Primfaktorzerlegung einer Zahl n i", "const_coeff == 0: return 0 # Erhalte Primfaktorzerlegung der Konstante,", "for x, y in itertools.combinations(non_zero_polynomial, 2): if hcf(x, y) !=", "n % i == 0: factors.append(i) return factors def prime_factor(n):", "Grad m >= 1 haben if polynomial.degree() < 1: return", "!= 0: return 2 # teilt die Primzahl den Koeffizienten", "logging.error(\"Polynom ungültig\") const_coefficient = polynomial.coefficients[0] if const_coefficient == 0: return", "= 1 i = 0 for coeff in polynomial.coefficients: if", "Nullen würden Ergebnis von HCF verfälschen if polynomial.degree() == 0:", "for p in prime_factors: if ( const_coeff % pow(p, 2)", "die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden", "assert lead_coefficient == 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1]) total", "0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1 nm1_coefficient =", "if nm1_coefficient > total: return 1 return 2 # Quellen:", "ein Polynom teilerfremd (coprime) ist\"\"\" non_zero_polynomial = [ i for", "direkten Listen von Koeffizienten \"\"\" import logging import helper import", "factors def prime_factor(n): # Primfaktorzerlegung einer Zahl n i =", "Eisenstein- und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome", "m - 1 teilt. p^2 darf a0 nicht teilen const_coeff", "# Voraussetzung für Eisenstein sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is", "polynomial.coefficients[0 : polynomial.degree() - 1]: if coeff % p !=", "Polynom teilerfremd (coprime) ist\"\"\" non_zero_polynomial = [ i for i", "= [] for i in range(1, n + 1): if", "Voraussetzung für Eisenstein sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is False):", "while i * i <= n: if n % i:", "return 1 return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def", "* i <= n: if n % i: i +=", "+= abs(coeff) i = i + 1 if nm1_coefficient >", ">= 1 haben if polynomial.degree() < 1: return 2 #", "if helper.is_polynomial_coprime(polynomial is False): return 2 # Prüfe, ob es", "# teilt die Primzahl den Koeffizienten nicht, kann keine Aussage", "i in polynomial.coefficients if i != 0 ] # Nullen", "Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die absolute", "nm1_coefficient > total: return 1 return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf", "1: factors.append(n) return factors # rekursive Implementierung von HCF def", "i for i in polynomial.coefficients if i != 0 ]", "prime_factors: if ( const_coeff % pow(p, 2) != 0 ):", "1 funktioniert nicht. Keine Aussage möglich, wenn vorletzer Koeffizient kleiner", "def prime_factor(n): # Primfaktorzerlegung einer Zahl n i = 2", "https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome vom Typ Polynomial, keine direkten", "< polynomial.degree() - 1: total += abs(coeff) i = i", "ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms", "return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): \"\"\"", "const_coefficient == 0: return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient", "i < polynomial.degree() - 1: total += abs(coeff) i =", "möglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe", "< 1: return 2 # Voraussetzung für Eisenstein sind teilerfremde", "0 factors = [] for i in range(1, n +", "polynomial.degree() < 0: return logging.error(\"Polynom ungültig\") const_coefficient = polynomial.coefficients[0] if", "in polynomial.coefficients: if i < polynomial.degree() - 1: total +=", "Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen von", "einer Zahl n i = 0 factors = [] for", "factors.append(i) if n > 1: factors.append(n) return factors # rekursive", "0: return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1", "# rekursive Implementierung von HCF def hcf(x, y): \"\"\"Highest common", "Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten", "2 # Prüfe, ob es eine Primzahl gibt, die alle", "for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]: if coeff", "und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome vom", "Polynom auf Irreduzierbarkeit (Perron). Führender Koeffizient != 1 funktioniert nicht.", "http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen", "i != 0 ] # Nullen würden Ergebnis von HCF", "True for x, y in itertools.combinations(non_zero_polynomial, 2): if hcf(x, y)", "\"\"\" import logging import helper import itertools def factor(n): #", "% y) def is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob ein Polynom teilerfremd (coprime)", "if i != 0 ] # Nullen würden Ergebnis von", "Koeffizienten \"\"\" if polynomial.degree() < 0: return logging.error(\"Polynom ungültig\") const_coefficient", "http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): \"\"\" Eine Implementierung des Eisensteinkriteriums. \"\"\" #", "prime_factors = helper.prime_factor(const_coeff) for p in prime_factors: if ( const_coeff", "is_irreducible_perron(polynomial): \"\"\" Prüft ein Polynom auf Irreduzierbarkeit (Perron). Führender Koeffizient", "keine Aussage getroffen werden return 2 for coeff in polynomial.coefficients[0", "): # teilt p^2 den konstanten Koeffizienten, dann kann keine", "factor\"\"\" if y == 0: return x else: return hcf(y,", "ob ein Polynom teilerfremd (coprime) ist\"\"\" non_zero_polynomial = [ i", "des Polynoms bis Grad m - 1 teilt. p^2 darf", "helper.prime_factor(const_coeff) for p in prime_factors: if ( const_coeff % pow(p,", "1 i = 0 for coeff in polynomial.coefficients: if i", "Polynoms bis Grad m - 1 teilt. p^2 darf a0", "i += 1 else: n //= i factors.append(i) if n", "Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): \"\"\" Eine Implementierung des", "Grundlage von Primzahlen zu erhalten prime_factors = helper.prime_factor(const_coeff) for p", "Koeffizienten if helper.is_polynomial_coprime(polynomial is False): return 2 # Prüfe, ob", "das Eisenstein- und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf Übergeben werden", "polynomial.coefficients[0] if const_coeff == 0: return 0 # Erhalte Primfaktorzerlegung", "return 2 # teilt die Primzahl den Koeffizienten nicht, kann", "\"\"\" Irreduzibilitätskriterien Implementiert wurden das Eisenstein- und das Perronkriterium Quellen:", "dann kann keine Aussage getroffen werden return 2 for coeff", "die absolute Summe der restlichen Koeffizienten \"\"\" if polynomial.degree() <", "= abs(polynomial.coefficients[polynomial.degree() - 1]) total = 1 i = 0", "def is_irreducible_perron(polynomial): \"\"\" Prüft ein Polynom auf Irreduzierbarkeit (Perron). Führender", "n + 1): if n % i == 0: factors.append(i)", "hcf(x, y): \"\"\"Highest common factor\"\"\" if y == 0: return", "von Koeffizienten \"\"\" import logging import helper import itertools def", "p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden", "Prüft ein Polynom auf Irreduzierbarkeit (Perron). Führender Koeffizient != 1", "= i + 1 if nm1_coefficient > total: return 1", "+= 1 else: n //= i factors.append(i) if n >", "if y == 0: return x else: return hcf(y, x", "Irreduzierbarkeit (Perron). Führender Koeffizient != 1 funktioniert nicht. Keine Aussage", "1 else: n //= i factors.append(i) if n > 1:", "2) != 0 ): # teilt p^2 den konstanten Koeffizienten,", "total: return 1 return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf", "is_irreducible_eisenstein(polynomial): \"\"\" Eine Implementierung des Eisensteinkriteriums. \"\"\" # Polynom muss", "# Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu", "0 # Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen", "# Nullen würden Ergebnis von HCF verfälschen if polynomial.degree() ==", "i = i + 1 if nm1_coefficient > total: return", "== 0: return True for x, y in itertools.combinations(non_zero_polynomial, 2):", "factors = [] while i * i <= n: if", "itertools def factor(n): # Faktorisierung einer Zahl n i =", "Grad m - 1 teilt. p^2 darf a0 nicht teilen", "die alle Koeffizienten des Polynoms bis Grad m - 1", "Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden return", "else: n //= i factors.append(i) if n > 1: factors.append(n)", "2 # teilt die Primzahl den Koeffizienten nicht, kann keine", "polynomial.coefficients if i != 0 ] # Nullen würden Ergebnis", "def hcf(x, y): \"\"\"Highest common factor\"\"\" if y == 0:", "import logging import helper import itertools def factor(n): # Faktorisierung", "if n % i: i += 1 else: n //=", "# Polynom muss einen Grad m >= 1 haben if", "ungültig\") const_coefficient = polynomial.coefficients[0] if const_coefficient == 0: return 0", "1: total += abs(coeff) i = i + 1 if", "Polynom muss einen Grad m >= 1 haben if polynomial.degree()", "\"\"\" Eine Implementierung des Eisensteinkriteriums. \"\"\" # Polynom muss einen", "itertools.combinations(non_zero_polynomial, 2): if hcf(x, y) != 1: return False return", "for i in range(1, n + 1): if n %", "Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2", "i = 2 factors = [] while i * i", "i = 0 factors = [] for i in range(1,", "1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1]) total = 1 i", "n i = 2 factors = [] while i *", "muss einen Grad m >= 1 haben if polynomial.degree() <", "pow(p, 2) != 0 ): # teilt p^2 den konstanten", "Koeffizienten, dann kann keine Aussage getroffen werden return 2 for", "else: return hcf(y, x % y) def is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob", "Eine Implementierung des Eisensteinkriteriums. \"\"\" # Polynom muss einen Grad", "würden Ergebnis von HCF verfälschen if polynomial.degree() == 0: return", "https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): \"\"\" Prüft ein Polynom auf Irreduzierbarkeit (Perron).", "2 for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]: if", "= [] while i * i <= n: if n", "teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen", "von Primzahlen zu erhalten prime_factors = helper.prime_factor(const_coeff) for p in", "= 2 factors = [] while i * i <=", "2): if hcf(x, y) != 1: return False return True", "als die absolute Summe der restlichen Koeffizienten \"\"\" if polynomial.degree()", "= helper.prime_factor(const_coeff) for p in prime_factors: if ( const_coeff %", "x else: return hcf(y, x % y) def is_polynomial_coprime(polynomial): \"\"\"Überprüft,", "factors # rekursive Implementierung von HCF def hcf(x, y): \"\"\"Highest", "konstanten Koeffizienten, dann kann keine Aussage getroffen werden return 2", "keine direkten Listen von Koeffizienten \"\"\" import logging import helper", "if polynomial.degree() < 0: return logging.error(\"Polynom ungültig\") const_coefficient = polynomial.coefficients[0]", "== 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1]) total = 1", "0 for coeff in polynomial.coefficients: if i < polynomial.degree() -", "einer Zahl n i = 2 factors = [] while", "1 if nm1_coefficient > total: return 1 return 2 #", "if const_coefficient == 0: return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert", "if polynomial.degree() < 1: return 2 # Voraussetzung für Eisenstein", "== 0: return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient ==", "vom Typ Polynomial, keine direkten Listen von Koeffizienten \"\"\" import", "% i: i += 1 else: n //= i factors.append(i)", "\"\"\" Prüft ein Polynom auf Irreduzierbarkeit (Perron). Führender Koeffizient !=", "return 0 # Erhalte Primfaktorzerlegung der Konstante, um Grundlage von", "non_zero_polynomial = [ i for i in polynomial.coefficients if i", "\"\"\" # Polynom muss einen Grad m >= 1 haben", "i + 1 if nm1_coefficient > total: return 1 return", "kleiner ist als die absolute Summe der restlichen Koeffizienten \"\"\"", "2 # Voraussetzung für Eisenstein sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial", "0: return True for x, y in itertools.combinations(non_zero_polynomial, 2): if", "[ i for i in polynomial.coefficients if i != 0", "nicht. Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als", "Ergebnis von HCF verfälschen if polynomial.degree() == 0: return True", "if n > 1: factors.append(n) return factors # rekursive Implementierung", "return hcf(y, x % y) def is_polynomial_coprime(polynomial): \"\"\"Überprüft, ob ein", "polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1])", "a0 nicht teilen const_coeff = polynomial.coefficients[0] if const_coeff == 0:", "Zahl n i = 2 factors = [] while i", "ist\"\"\" non_zero_polynomial = [ i for i in polynomial.coefficients if", "polynomial.coefficients[0] if const_coefficient == 0: return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()]", "(coprime) ist\"\"\" non_zero_polynomial = [ i for i in polynomial.coefficients", "bis Grad m - 1 teilt. p^2 darf a0 nicht", "coeff in polynomial.coefficients[0 : polynomial.degree() - 1]: if coeff %", "Eisensteinkriteriums. \"\"\" # Polynom muss einen Grad m >= 1", "des Eisensteinkriteriums. \"\"\" # Polynom muss einen Grad m >=", "werden return 2 for coeff in polynomial.coefficients[0 : polynomial.degree() -", "% p != 0: return 2 # teilt die Primzahl", "teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen", "abs(polynomial.coefficients[polynomial.degree() - 1]) total = 1 i = 0 for", "0: return 0 # Erhalte Primfaktorzerlegung der Konstante, um Grundlage" ]
[ "with the IR for the # stencil kernel body. func_text", "range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo = kernel_size[i][0] hi = kernel_size[i][1]", "not match array dimensionality.\") return (neighborhood, relatively_indexed) def get_return_type(self, argtys):", "more than one relatively indexed arrays, add a call to", "tuple. if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ = stmt_index_var_typ[dim] else: one_index_typ =", "len(relatively_indexed) > 1: func_text += \" raise_if_incompatible_array_sizes(\" + first_arg for", "this function. stencil_func_name = \"__numba_stencil_%s_%s\" % ( hex(id(the_array)).replace(\"-\", \"_\"), self.id)", "2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause # import copy import", "tuple construction. if len(index_vars) == 1: rvar = ir.Var(scope, out_name,", "Those loop nests use the # computed stencil kernel size", "len(index_vars) == 1: rvar = ir.Var(scope, out_name, loc) ivar =", "isinstance(stmt, ir.Return): ret_blocks.append(label) # If 1D array then avoid the", "ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['setitem', 'static_setitem'] and", "the dimensions of the input array. Those loop nests use", "index variables into # ir.Var's. var_index_vars = [] for one_var", "new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: acc_call = ir.Expr.binop(operator.add,", "kernel. func_text = \"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra) # Get loop", "stencil kernel body becomes the body of a loop, for", "typemap[stmt_index_var.name] # Same idea as above but you have to", "array\" % (len(neighborhood), ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set()", "\", neighborhood=None\" # look in the type cache first if", "index used after looking up the variable in # the", "# So, take the minimum of 0 and the minimum", "# particular point in the iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks,", "input array. shape_name = ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text += \" {}", "default style func = func_or_mode else: mode = func_or_mode func", "But the current block gets a new label. body_first_label =", "argtys, None, {}) if isinstance(return_type, types.npytypes.Array): raise NumbaValueError( \"Stencil kernel", "argtys, kwtys, return_type, sigret): # look in the type cache", "from numba.core import types, typing, utils, ir, config, ir_utils, registry", "input.\") def slice_addition(the_slice, addend): \"\"\" Called by stencil in Python", "types that are not compatible # (e.g. values as float[:]", "array. Here we create the name for # the index", "to the gufunc # function's IR. for (l, b) in", "!= 'constant': raise ValueError(\"Unsupported mode style \" + mode) def", "[tmpvar] index_var = ir.Var(scope, index_names[dim], loc) index_vars += [index_var] tmpname", "arg.ndim: raise ValueError(\"Secondary stencil array does not have same number", "raise NumbaValueError( \"Stencil kernel must return a scalar and not", "# Add index variables to getitems in the IR to", "them with a SetItem call of the value \"returned\" by", "after label and variable renaming of the stencil kernel IR", "the input typing context. \"\"\" _ty_cls = type('StencilFuncTyping_' + str(self.id),", "a # unique stencil function name, the parameters to the", "+ stencil kernel IR into existence. # Copy the kernel", "'static_getitem'] and stmt.value.value.name in kernel.arg_names and stmt.value.value.name not in standard_indexed):", "= context.call_internal(builder, cres.fndesc, sig, args) context.add_linking_libs([cres.library]) return res @register_jitable def", "> argshape[i]: raise ValueError(\"Secondary stencil array has some dimension \"", "The stencil kernel body becomes the body of a loop,", "allocate the array. if result is None: return_type_name = numpy_support.as_dtype(", "block across __sentinel__ # A new block is allocated for", "the first input array. if len(relatively_indexed) > 1: func_text +=", "is not allowed.\") if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and", "given IR along with its calltype information. We need a", "might take # multiple input arrays with different types that", "calltypes: copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label] = new_block return (kernel_copy, copy_calltypes)", "array (the out argument was not used) # then us", "tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) ind_stencils +=", "self.options: cval = self.options[\"cval\"] cval_ty = typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty,", "more than three total arrays are given, the second and", "from numba.core.typing.templates import (CallableTemplate, signature, infer_global, AbstractTemplate) from numba.core.imputils import", "the tuple indexing # expression and add the corresponding index", "a scalar and not a numpy array.\") real_ret = types.npytypes.Array(return_type,", "or variable. In the latter case we'll use the extra", "relatively_indexed = set() for block in kernel.blocks.values(): scope = block.scope", "+= [tmpvar] index_var = ir.Var(scope, index_names[dim], loc) index_vars += [index_var]", "in # the const dictionary. if need_to_calc_kernel: assert hasattr(stmt_index_var, 'name')", "prevent # conflicts with the stencil function IR. # 5)", "block in stencil_ir.blocks.items(): for i, inst in enumerate(block.body): if (isinstance(", "else: return str(cval) # If we have to allocate the", "slice. \"\"\" return slice(the_slice.start + addend, the_slice.stop + addend) class", "compile_for_argtys(self, argtys, kwtys, return_type, sigret): # look in the type", "+= 1 for j in range(offset): func_text += \" \"", "loc)) # Get the type of this particular part of", "elements where # elements outside the bounds of the input", "None: need_to_calc_kernel = True else: need_to_calc_kernel = False if len(neighborhood)", "s_index_var = ir.Var(scope, s_index_name, loc) # Build a tuple from", "raise an error if any of the relatively indexed #", "class StencilFuncLowerer(object): '''Callable class responsible for lowering calls to a", "so that our changes for this callsite # won't effect", "create the unique name of this function. stencil_func_name = \"__numba_stencil_%s_%s\"", "new stencil function into existence. exec(func_text) in globals(), locals() stencil_func", "need literal_unroll here because the stencil might take # multiple", "# or np.zeros if they didn't to allocate the array.", "existence. exec(func_text) in globals(), locals() stencil_func = eval(stencil_func_name) if sigret", "= ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc)) # Get the", "stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) # Search all", "def raise_if_incompatible_array_sizes(a, *args): ashape = a.shape # We need literal_unroll", "the return statements original value into # the array using", "return in the stencil kernel to the block # containing", "rdtype = result.dtype rttype = numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype, result.ndim,", "# Converts cval to a string constant def cval_as_str(cval): if", "{}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else: acc_call = ir.Expr.binop(operator.add, getitemvar, index_vars[dim],", "cval_ty = typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg = \"cval", "result = None if 'out' in kwtys: argtys_extra += (kwtys['out'],)", "\"'neighborhood' option required\") index_len = len(index) elif isinstance(index, int): neighborhood[0][0]", "index_vars, out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"After replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks)", "create the name for # the index variable for each", "+= \" return {}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"new stencil", "ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar = ir.Var(scope, out_name, loc)", "return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if \"cval\" in self.options: cval =", "a copy of a given IR along with its calltype", "variables to getitems in the IR to transition the accesses", "getitem from the input array. if stmt.value.op == 'getitem': stmt_index_var", "the array. if result is None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__", "{}.shape\\n\".format(shape_name, first_arg) # Converts cval to a string constant def", "indexed arrays, add a call to # a function that", "out of the tuple indexing # expression and add the", "remember original kws arguments # stencils only supported for CPU", "= block.scope # split block across __sentinel__ # A new", "ir.Const)): if config.DEBUG_ARRAY_OPT >= 1: print(\"remembering in const_dict\", stmt.target.name, stmt.value.value)", "called on function without specifying mode style if not isinstance(func_or_mode,", "ndim: raise ValueError(\"%d dimensional neighborhood specified for %d \" \\", "kwtys: argtys_extra += (kwtys['out'],) sig_extra += \", out=None\" result =", "a given IR along with its calltype information. We need", "calltypes then add the type associated with this # statement", "used as stencil index.\") if index_len != ndim: raise NumbaValueError(", "= utils.pysignature(stencil_func) sigret.pysig = pysig # Get the IR for", "const dictionary. if need_to_calc_kernel: assert hasattr(stmt_index_var, 'name') if stmt_index_var.name in", "== 'getitem': stmt_index_var = stmt.value.index else: stmt_index_var = stmt.value.index_var #", "index used in the i'th dimension # but minimum's greater", "type to hold stencil information for the IR. \"\"\" id_counter", "= ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra = \"\"", "typing.typeof.typeof(cval): msg = \"cval type does not match stencil return", "if (isinstance( inst, ir.Assign) and inst.target.name == sentinel_name): # We", "tmpvar = ir.Var(scope, tmpname, loc) ind_stencils += [tmpvar] getitemname =", "_ty_cls) def compile_for_argtys(self, argtys, kwtys, return_type, sigret): # look in", "in this new function is a special sentinel # assignment.", "dimension in the first \" \"stencil input.\") def slice_addition(the_slice, addend):", "a typing class for a StencilFunc object in the input", "nest in the generated function for each # dimension in", "+= (kwtys['neighborhood'],) sig_extra += \", neighborhood=None\" # look in the", "# tmpvar will hold the real index and is computed", "IR. We will # remove this sentinel assignment and replace", "= eval(stencil_func_name) if sigret is not None: pysig = utils.pysignature(stencil_func)", "def _type_me(self, argtys, kwtys): \"\"\" Implement AbstractTemplate.generic() for the typing", "else: one_index_typ = stmt_index_var_typ[:] # If the array is indexed", "constant,\" \"'neighborhood' option required\") index_len = len(index) elif isinstance(index, int):", "def __call__(self, *args, **kwargs): if (self.neighborhood is not None and", "not standard indexing.\") if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: raise", "shape_name, cval_as_str(cval), return_type_name) else: out_init =\"{} = np.zeros({}, dtype=np.{})\\n\".format( out_name,", "return slice(the_slice.start + addend, the_slice.stop + addend) class StencilFunc(object): \"\"\"", "__call__(self, context, builder, sig, args): cres = self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type,", "return new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options): # called", "new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt) block.body =", "(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir, calltypes) # The stencil kernel", "sig = signature(real_ret, *argtys_extra) dummy_text = (\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names),", "put a loop nest in the generated function for each", "1: # Single dimension always has index variable 'index0'. #", "index_var_name = ir_utils.get_unused_var_name(\"index\" + str(i), name_var_table) index_vars += [index_var_name] #", "block gets a new label. body_first_label = min(kernel_copy.blocks.keys()) # The", "in block.body: if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT", "np.isnan(cval): return \"np.nan\" elif np.isinf(cval): if cval < 0: return", "= min(neighborhood[0][0], index) neighborhood[0][1] = max(neighborhood[0][1], index) index_len = 1", "sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc) calltypes[slice_addition_call]", "ir_utils.dump_blocks(kernel_copy.blocks) # The return in the stencil kernel becomes a", "= \"cval type does not match stencil return type.\" raise", "!= 0: raise NumbaValueError(\"Standard indexing requested for an array name", "= registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood =", "# conflicts with the stencil function IR. # 5) Compile", "import types, typing, utils, ir, config, ir_utils, registry from numba.core.typing.templates", "the new copy of the kernel # and if the", "result.ndim, numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full", "hi = kernel_size[i][1] else: lo = \"{}[{}][0]\".format(neighborhood_name, i) hi =", "Find return statements in the IR and replace them with", "the type cache first if argtys_extra in self._type_cache: (_sig, _,", "for the stencil function # that will execute the stencil", "label and variable renaming of the stencil kernel IR to", "applied to the copied IR will change the calltypes and", "one_index_typ = stmt_index_var_typ[dim] else: one_index_typ = stmt_index_var_typ[:] # If the", "sum_results = [] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name,", "new_body.append(ir.Assign(getitemcall, getitemvar, loc)) # Get the type of this particular", "tuple([typing.typeof.typeof(x) for x in args]) array_types_full = tuple([typing.typeof.typeof(x) for x", "getitemname = ir_utils.mk_unique_var(\"getitem\") getitemvar = ir.Var(scope, getitemname, loc) getitemcall =", "we # have to add the index value with a", "offset = 1 # Add the loop nests to the", "the user specified a cval stencil decorator option # or", "user into one that includes each dimension's index variable as", "stmt.target,loc)) else: new_body.append(stmt) block.body = new_body if need_to_calc_kernel: # Find", "tmpvar, loc)) const_index_vars += [tmpvar] index_var = ir.Var(scope, index_names[dim], loc)", "names of the index variables into # ir.Var's. var_index_vars =", "loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: acc_call =", "and stmt.value.op in ['setitem', 'static_setitem'] and stmt.value.value.name in kernel.arg_names) or", "stencil outline for the sentinel. for label, block in stencil_ir.blocks.items():", "self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"After replace_return_with_setitem\", ret_blocks)", "and isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT >= 1: print(\"remembering in const_dict\",", "in effect array[-1] becomes array[index0-1]. \"\"\" const_dict = {} kernel_consts", "index_len != ndim: raise NumbaValueError( \"Stencil index does not match", "args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps, out_cps =", "body. func_text += \"{} = 0\\n\".format(sentinel_name) func_text += \" return", "found the sentinel assignment. loc = inst.loc scope = block.scope", "stmt.value.index to # the current absolute location in index0. index_var", "unique stencil function name, the parameters to the stencil kernel,", "args): cres = self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None) res = context.call_internal(builder,", "kernel index is not \" \"constant, 'neighborhood' option required\") if", "if stmt.value.op == 'getitem': stmt_index_var = stmt.value.index else: stmt_index_var =", "in kwtys: argtys_extra += (kwtys['out'],) sig_extra += \", out=None\" result", "# remove this sentinel assignment and replace it with the", "# For each block... for (block_label, block) in ir.blocks.items(): new_block", "def __init__(self, sf): self.stencilFunc = sf def __call__(self, context, builder,", "\"\"\"Constructs and installs a typing class for a StencilFunc object", "for out and neighborhood. out_name = ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name =", "this dimension because negative maximums would not cause us to", "return new_func def _type_me(self, argtys, kwtys): \"\"\" Implement AbstractTemplate.generic() for", "# The previous block jumps to the minimum labelled block", "for i in range(len(ashape)): if ashape[i] > argshape[i]: raise ValueError(\"Secondary", "index1, ... index_vars = [] for i in range(the_array.ndim): index_var_name", "\"smaller the same dimension in the first \" \"stencil input.\")", "# Get the shape of the first input array. shape_name", "self._stencil_wrapper(result, None, real_ret, typemap, calltypes, *array_types_full) if result is None:", "block.body: if isinstance(stmt, ir.Return): ret_blocks.append(label) # If 1D array then", "decorated @lower_builtin(stencil) def stencil_dummy_lower(context, builder, sig, args): \"lowering for dummy", "this stencil and here # create the unique name of", "+= (\"for {} in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0], shape_name, i,", "# to them and then reconstitute as a tuple that", "!= ndim: raise NumbaValueError( \"Stencil index does not match array", "stmt_index_var = stmt.value.index_var # allow static_getitem since rewrite passes are", "type associated with this # statement to the calltypes copy.", "the block # containing statements after the sentinel. for ret_block", "being used. func_text += (\"for {} in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i],", "NumbaValueError(\"stencil kernel index is not \" \"constant, 'neighborhood' option required\")", "it if \"cval\" in self.options: cval = self.options[\"cval\"] cval_ty =", "if sigret is not None: pysig = utils.pysignature(stencil_func) sigret.pysig =", "# Copy the kernel so that our changes for this", "\"\"\" Called by stencil in Python mode to add the", "# SPDX-License-Identifier: BSD-2-Clause # import copy import numpy as np", "We need literal_unroll here because the stencil might take #", "specified a cval stencil decorator option # or np.zeros if", "in standard_indexed: raise NumbaValueError(\"The first argument to a stencil kernel", "not constant,\" \"'neighborhood' option required\") index_len = len(index) elif isinstance(index,", "= 1 else: raise NumbaValueError( \"Non-tuple or non-integer used as", "stencil kernel. This function definition includes a # unique stencil", "return_type.dtype): msg = \"cval type does not match stencil return", "a call to # a function that will raise an", "_, _) = self._type_cache[argtys_extra] return _sig (real_ret, typemap, calltypes) =", "shape_name, return_type_name) func_text += \" \" + out_init else: #", "= None array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full", "each block... for (block_label, block) in ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label])", "typemap, copy_calltypes) if self.neighborhood is None: self.neighborhood = kernel_size if", "kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap,", "if len(index_vars) == 1: rvar = ir.Var(scope, out_name, loc) ivar", "with no accesses to \" \"relatively indexed arrays.\") for index", "types.Type)) array_types = args new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT >=", "if (self.neighborhood is not None and len(self.neighborhood) != args[0].ndim): raise", "stencil kernel IR to prevent # conflicts with the stencil", "replaced loop # body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir(", "neighborhood[0][0] = min(neighborhood[0][0], index) neighborhood[0][1] = max(neighborhood[0][1], index) index_len =", "and a list of the relatively indexed # arrays. kernel_size,", "if config.DEBUG_ARRAY_OPT >= 1: print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) # The return", "we can locate it in the IR. We will #", "new_func = self._stencil_wrapper(result, sigret, return_type, typemap, calltypes, *argtys) return new_func", "ndim == 1: # Single dimension always has index variable", "ranges[i][0], shape_name, i, ranges[i][1]) offset += 1 for j in", "tmpname = ir_utils.mk_unique_var(\"const_index\") tmpvar = ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc),", "to prevent # conflicts with the stencil function IR. #", "dimensionality.\") return (neighborhood, relatively_indexed) def get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT >=", "_sig (real_ret, typemap, calltypes) = self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra)", "Numba function to execute this stencil and here # create", "than three total arrays are given, the second and third", "\"\"\" Find return statements in the IR and replace them", "\"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra) # Get loop ranges for each", "function is a special sentinel # assignment. # 3) Get", "the array. # So, take the minimum of 0 and", "sigret is not None: pysig = utils.pysignature(stencil_func) sigret.pysig = pysig", "(len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys sig_extra = \"\" result =", "kwtys): \"\"\" Implement AbstractTemplate.generic() for the typing class built by", "= [] # For each statement in each block... for", "loop nest in the generated function for each # dimension", "arrays, add a call to # a function that will", "kernel to the block # containing statements after the sentinel.", "for option in options: if option not in [\"cval\", \"standard_indexing\",", "literal_unroll(args): if a.ndim != arg.ndim: raise ValueError(\"Secondary stencil array does", "from relative to regular Python indexing. Returns the # computed", "if option not in [\"cval\", \"standard_indexing\", \"neighborhood\"]: raise ValueError(\"Unknown stencil", "into # the array using the tuple index. si =", "the second and third # are iterated over in the", "return a scalar and not a numpy array.\") real_ret =", "ir.SetItem(rvar, s_index_var, stmt.value, loc) new_body.append(si) else: new_body.append(stmt) block.body = new_body", "= argtys sig_extra = \"\" result = None if 'out'", "contained return statements. \"\"\" ret_blocks = [] for label, block", "the first \" \"stencil input.\") def slice_addition(the_slice, addend): \"\"\" Called", "index_vars = [] for i in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name(\"index\"", "stmt_index_var_typ = typemap[stmt_index_var.name] # If the array is indexed with", "x in ret_blocks] if config.DEBUG_ARRAY_OPT >= 1: print(\"ret_blocks w/ offsets\",", "style if not isinstance(func_or_mode, str): mode = 'constant' # default", "{}, sig.return_type, None) res = context.call_internal(builder, cres.fndesc, sig, args) context.add_linking_libs([cres.library])", "self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra) dummy_text = (\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format(", "infer_global, AbstractTemplate) from numba.core.imputils import lower_builtin from numba.core.extending import register_jitable", "stencil function name, the parameters to the stencil kernel, loop", "# 5) Compile the combined stencil function IR + stencil", "(_, result, typemap, calltypes) = self._type_cache[argtys] new_func = self._stencil_wrapper(result, sigret,", "\"dimensional input array\".format( len(self.neighborhood), args[0].ndim)) if 'out' in kwargs: result", "stencil function IR # after label and variable renaming of", "raise NumbaValueError( \"Non-tuple or non-integer used as stencil index.\") if", ">= 1: print(\"__call__\", array_types, args, kwargs) (real_ret, typemap, calltypes) =", "tuple_call = ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc))", "block stencil_ir.blocks[label] = prev_block # Add a jump from all", "== 1: rvar = ir.Var(scope, out_name, loc) ivar = ir.Var(scope,", "print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError(\"The first", "Copy the statement to the new copy of the kernel", "and len(self.neighborhood) != args[0].ndim): raise ValueError(\"{} dimensional neighborhood specified for", "in calltypes: copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label] = new_block return (kernel_copy,", "tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: index_vars", "index value with a call to # slice_addition. if isinstance(one_index_typ,", "calltypes) return sig def copy_ir_with_calltypes(self, ir, calltypes): \"\"\" Create a", "for one_var in index_vars: index_var = ir.Var(scope, one_var, loc) var_index_vars", "isinstance(stmt.value, ir.Expr) and stmt.value.op in ['setitem', 'static_setitem'] and stmt.value.value.name in", "option in options: if option not in [\"cval\", \"standard_indexing\", \"neighborhood\"]:", "= tuple([typing.typeof.typeof(x) for x in args]) array_types_full = tuple([typing.typeof.typeof(x) for", "# The return in the stencil kernel becomes a setitem", "else: need_to_calc_kernel = False if len(neighborhood) != ndim: raise ValueError(\"%d", "input array\" % (len(neighborhood), ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed =", "in the generated function for each # dimension in the", "as np from llvmlite import ir as lir from numba.core", "extract # individual elements out of the tuple indexing #", "array using the tuple index. si = ir.SetItem(rvar, s_index_var, stmt.value,", "of the stencil kernel and a list of the relatively", "parfor loop body blocks to the gufunc # function's IR.", "are given, the second and third # are iterated over", "assignment. Insert the stencil kernel IR into the stencil function", "= \"{}[:] = {}\\n\".format(out_name, cval_as_str(cval)) func_text += \" \" +", "- on that to get the positive offset in this", "array from being used. func_text += (\"for {} in range(-min(0,{}),\"", "from the index ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var,", "IR into the stencil function IR # after label and", "is None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if \"cval\" in self.options:", "array is passed (_, result, typemap, calltypes) = self._type_cache[argtys] new_func", "= ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 #", "\"\"\" return slice(the_slice.start + addend, the_slice.stop + addend) class StencilFunc(object):", "the statement to the new copy of the kernel #", "copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label] = new_block return (kernel_copy, copy_calltypes) def", "in enumerate(block.body): if (isinstance( inst, ir.Assign) and inst.target.name == sentinel_name):", "previous block jumps to the minimum labelled block of #", "eval(\"__numba_dummy_stencil\") sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra] = (sig,", "kernel must \" \"use relative indexing, not standard indexing.\") if", "stencil(func_or_mode='constant', **options): # called on function without specifying mode style", "return (kernel_copy, copy_calltypes) def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes,", "remove the sentinel # assignment. Insert the stencil kernel IR", "and variable renaming of the stencil kernel IR to prevent", "static_getitem since rewrite passes are applied #raise ValueError(\"Unexpected static_getitem in", "def _stencil(mode, options): if mode != 'constant': raise ValueError(\"Unsupported mode", "# stencils only supported for CPU context currently self._typingctx =", "array. Returns the block labels that contained return statements. \"\"\"", "the observed maximum index # in this dimension because negative", "the block containing the sentinel assignment and remove the sentinel", "type(self).id_counter type(self).id_counter += 1 self.kernel_ir = kernel_ir self.mode = mode", "with a slice then we # have to add the", "looking up the variable in # the const dictionary. if", "getitemname, loc) getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc))", "if isinstance(one_index_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func =", "array. Those loop nests use the # computed stencil kernel", "three total arrays are given, the second and third #", "function that will raise an error if any of the", "s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) # Build", "func = func_or_mode else: mode = func_or_mode func = None", "argtys sig_extra = \"\" result = None if 'out' in", "block is allocated for the statements prior to the #", "result, sigret, return_type, typemap, calltypes, *args): # Overall approach: #", "self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return new_func def __call__(self,", "generate a Numba function to execute this stencil and here", "new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar = ir.Var(scope, out_name, loc) # Write", "raise NumbaValueError(msg) out_init =\"{} = np.full({}, {}, dtype=np.{})\\n\".format( out_name, shape_name,", "is a string-repr numerical const, issue #7286 if np.isnan(cval): return", "variable # to them and then reconstitute as a tuple", "are applied #raise ValueError(\"Unexpected static_getitem in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) # Store", "function. stencil_func_name = \"__numba_stencil_%s_%s\" % ( hex(id(the_array)).replace(\"-\", \"_\"), self.id) #", "= ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set() for block in kernel.blocks.values(): scope", "ir.Var(scope, getitemname, loc) getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar,", "str(i), name_var_table) index_vars += [index_var_name] # Create extra signature for", "If we have to allocate the output array (the out", "= block stencil_ir.blocks[label] = prev_block # Add a jump from", "as part of the getitem calls. So, in effect array[-1]", "in standard_indexed): # We found a getitem from the input", "for i in range(the_array.ndim): for j in range(offset): func_text +=", "new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT >= 1: print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks)", "self._targetctx = registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood = self.options.get(\"neighborhood\") self._type_cache", "Shift labels in the kernel copy so they are guaranteed", "\"out\" in name_var_table: raise NumbaValueError(\"Cannot use the reserved word 'out'", "cres = self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None) res = context.call_internal(builder, cres.fndesc,", "result is not None: sig_extra += \", {}=None\".format(out_name) if \"neighborhood\"", "loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (),", "add the type associated with this # statement to the", "{} kernel_consts = [] if config.DEBUG_ARRAY_OPT >= 1: print(\"add_indices_to_kernel\", ndim,", "the IR and replace them with a SetItem call of", "function to execute the stencil kernel. func_text = \"def {}({}{}):\\n\".format(stencil_func_name,", "kernel, loop # nests across the dimensions of the input", ">= 1: print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0], types.npytypes.Array): raise", "minimum index found in the kernel # and this will", "else: lo = \"{}[{}][0]\".format(neighborhood_name, i) hi = \"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo,", "i, ranges[i][1]) offset += 1 for j in range(offset): func_text", "for x in args]) array_types_full = tuple([typing.typeof.typeof(x) for x in", "rewrite passes are applied #raise ValueError(\"Unexpected static_getitem in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name)", "self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) return sig def copy_ir_with_calltypes(self,", "the gufunc # function's IR. for (l, b) in kernel_copy.blocks.items():", "print(\"add_indices_to_kernel\", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood is None: need_to_calc_kernel =", "result = kwtys['out'] if 'neighborhood' in kwtys: argtys_extra += (kwtys['neighborhood'],)", "specific StencilFunc. ''' def __init__(self, sf): self.stencilFunc = sf def", "array\" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys sig_extra = \"\"", "effect other callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir, calltypes) #", "# have to add the index value with a call", "in block.body: if isinstance(stmt, ir.Return): ret_blocks.append(label) # If 1D array", "b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label]", "\",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(), locals() dummy_func = eval(\"__numba_dummy_stencil\") sig", "other_array != first_arg: func_text += \",\" + other_array func_text +=", "ir.Expr) and stmt.value.op in ['setitem', 'static_setitem'] and stmt.value.value.name in kernel.arg_names)", "= ir_utils.get_unused_var_name(\"index\" + str(i), name_var_table) index_vars += [index_var_name] # Create", "a call to # slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var =", "types.npytypes.Array): raise NumbaValueError(\"The first argument to a stencil kernel must", "and replace them with a SetItem call of the value", "not None and len(self.neighborhood) != args[0].ndim): raise ValueError(\"{} dimensional neighborhood", "ValueError(\"Assignments to arrays passed to stencil \" \\ \"kernels is", "\" + out_init offset = 1 # Add the loop", "Return the call-site signature. \"\"\" if (self.neighborhood is not None", "blocks to the gufunc # function's IR. for (l, b)", "for a StencilFunc object in the input typing context. \"\"\"", "neighborhood. out_name = ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra", "out and neighborhood. out_name = ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\",", "unary - on that to get the positive offset in", "assignment. # 3) Get the IR of this new function.", "dimension because negative maximums would not cause us to #", "invalid. \"\"\" copy_calltypes = {} kernel_copy = ir.copy() kernel_copy.blocks =", "ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError(\"The first argument to", "compiler.run_frontend(func) return StencilFunc(kernel_ir, mode, options) return decorated @lower_builtin(stencil) def stencil_dummy_lower(context,", "ret_blocks = [] for label, block in blocks.items(): scope =", "Python indexing. Returns the # computed size of the stencil", "func_text += \" \" # Put a sentinel in the", "self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood = self.options.get(\"neighborhood\") self._type_cache = {} self._lower_me =", "print(\"before replace sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) # Search all the block", "0: raise NumbaValueError(\"Standard indexing requested for an array name \"", "function without specifying mode style if not isinstance(func_or_mode, str): mode", "= numpy_support.as_dtype( return_type.dtype).type.__name__ if \"cval\" in self.options: cval = self.options[\"cval\"]", "the index tuple. if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ = stmt_index_var_typ[dim] else:", "a copy of the calltypes because copy propagation applied to", "= sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra] = (sig, result, typemap,", "var in var_table.items(): if not name in reserved_names: new_var_dict[name] =", "calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else:", "import ir as lir from numba.core import types, typing, utils,", "stencil_stub_last_label) print(\"before replace sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace sentinel kernel_copy\")", "np.isfinite(cval): # See if this is a string-repr numerical const,", "= max(kernel_copy.blocks.keys()) + 1 # Adjust ret_blocks to account for", "# Search all the block in the stencil outline for", "def slice_addition(the_slice, addend): \"\"\" Called by stencil in Python mode", "cval_as_str(cval), return_type_name) else: out_init =\"{} = np.zeros({}, dtype=np.{})\\n\".format( out_name, shape_name,", "\" \"not present in the stencil kernel definition.\") # Add", "for block in kernel.blocks.values(): scope = block.scope loc = block.loc", "current absolute location in index0. index_var = ir.Var(scope, index_names[0], loc)", "where # elements outside the bounds of the input array", "a getitem from the input array. if stmt.value.op == 'getitem':", "second and third # are iterated over in the loop", "\"\"\" A special type to hold stencil information for the", "te = index[i] if isinstance(te, ir.Var) and te.name in const_dict:", "NumbaValueError( \"Stencil index does not match array dimensionality.\") return (neighborhood,", "split block across __sentinel__ # A new block is allocated", "of this particular part of the index tuple. if isinstance(stmt_index_var_typ,", "te) neighborhood[i][1] = max(neighborhood[i][1], te) else: raise NumbaValueError( \"stencil kernel", "func_text += \"{} = 0\\n\".format(sentinel_name) func_text += \" return {}\\n\".format(out_name)", "self.id) # We will put a loop nest in the", "stencil array has some dimension \" \"smaller the same dimension", "new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else: acc_call = ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc)", "the body of a loop, for which args aren't needed.", "in the IR to transition the accesses # in the", "_stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args): # Overall approach:", "set then use it if \"cval\" in self.options: cval =", "j in range(offset): func_text += \" \" # ranges[i][0] is", "value # index used in the kernel specification. neighborhood =", "loop body blocks to the gufunc # function's IR. for", "kernel size so as not to try to compute elements", "loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call = ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var,", "relative indexing, not standard indexing.\") if len(set(standard_indexed) - set(kernel_copy.arg_names)) !=", "and stmt.value.op in ['getitem', 'static_getitem'] and stmt.value.value.name in kernel.arg_names and", "(real_ret, typemap, calltypes) = self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra) dummy_text", "ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table", "\" + out_init else: # result is present, if cval", "isinstance(index, list): for i in range(len(index)): te = index[i] if", "numba.core.imputils import lower_builtin from numba.core.extending import register_jitable from numba.core.errors import", "out_name): \"\"\" Find return statements in the IR and replace", "== 0: raise NumbaValueError(\"Stencil kernel with no accesses to \"", "be either int # or variable. In the latter case", "ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) const_index_vars = [] ind_stencils", "name_var_table: raise NumbaValueError(\"Cannot use the reserved word 'out' in stencil", "the stencil might take # multiple input arrays with different", "the new function. for i in range(the_array.ndim): for j in", "function. # 4) Split the block containing the sentinel assignment", "mode = 'constant' # default style func = func_or_mode else:", "1: print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined stencil function", "\"\"\" Implement AbstractTemplate.generic() for the typing class built by StencilFunc._install_type().", "= 'constant' # default style func = func_or_mode else: mode", "# a return in the stencil kernel to the block", "kernel into the result array. Returns the block labels that", "the original IR invalid. \"\"\" copy_calltypes = {} kernel_copy =", "dimension whose # use is precluded. # ranges[i][1] is the", "const_dict\", stmt.target.name, stmt.value.value) # Remember consts for use later. const_dict[stmt.target.name]", "self._type_cache[argtys] new_func = self._stencil_wrapper(result, sigret, return_type, typemap, calltypes, *argtys) return", "new_block return (kernel_copy, copy_calltypes) def _stencil_wrapper(self, result, sigret, return_type, typemap,", "in the IR and replace them with a SetItem call", "for stmt in block.body: if isinstance(stmt, ir.Return): ret_blocks.append(label) # If", "2) The but of the loop nest in this new", "a.shape # We need literal_unroll here because the stencil might", "ir.Var(scope, out_name, loc) ivar = ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar,", "len(neighborhood) != ndim: raise ValueError(\"%d dimensional neighborhood specified for %d", "const_index_vars += [tmpvar] index_var = ir.Var(scope, index_names[dim], loc) index_vars +=", "type cache to find if result array is passed (_,", "as specified by the user into one that includes each", "1: print(\"name_var_table\", name_var_table, sentinel_name) the_array = args[0] if config.DEBUG_ARRAY_OPT >=", "\"{} = 0\\n\".format(sentinel_name) func_text += \" return {}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT", "arrays.\") for index in kernel_consts: if isinstance(index, tuple) or isinstance(index,", "= block.loc new_body = [] for stmt in block.body: if", "kernel from relative to regular Python indexing. Returns the #", "array names. standard_indexed = self.options.get(\"standard_indexing\", []) if first_arg in standard_indexed:", "for %d \" \\ \"dimensional input array\" % (len(neighborhood), ndim))", "relatively indexed # arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars,", "mode self.options = options self.kws = [] # remember original", "is indexed with a slice then we # have to", "+= (kwtys['out'],) sig_extra += \", out=None\" result = kwtys['out'] if", "# assignment. Insert the stencil kernel IR into the stencil", "function. ranges = [] for i in range(the_array.ndim): if isinstance(kernel_size[i][0],", "will raise an error if any of the relatively indexed", "in range(offset): func_text += \" \" # Put a sentinel", "ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func = numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name]", "*args): ashape = a.shape # We need literal_unroll here because", "array has some dimension \" \"smaller the same dimension in", "stmt.value.op in ['setitem', 'static_setitem'] and stmt.value.value.name in kernel.arg_names) or (isinstance(stmt,", "copy_calltypes) if self.neighborhood is None: self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT", "def add_indices_to_kernel(self, kernel, index_names, ndim, neighborhood, standard_indexed, typemap, calltypes): \"\"\"", "# the current absolute location in index0. index_var = ir.Var(scope,", "For each statement in each block... for stmt in ir.blocks[block_label].body:", "+ str(i), name_var_table) index_vars += [index_var_name] # Create extra signature", "We found a getitem from the input array. if stmt.value.op", "combined stencil function with the replaced loop # body in", "variable for each dimension. index0, index1, ... index_vars = []", "new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt) block.body = new_body if need_to_calc_kernel:", "range(offset): func_text += \" \" # Put a sentinel in", "None, real_ret, typemap, calltypes, *array_types_full) if result is None: return", "relatively indexed # arrays are of different size than the", "the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys())", "[] ind_stencils = [] stmt_index_var_typ = typemap[stmt_index_var.name] # Same idea", "generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self, argtys, kwtys, return_type, sigret): #", "type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self,", "if cval is set then use it if \"cval\" in", "variable as part of the getitem calls. So, in effect", "< 0: return \"-np.inf\" else: return \"np.inf\" else: return str(cval)", "labels that contained return statements. \"\"\" ret_blocks = [] for", "used in the i'th dimension # but minimum's greater than", "# called on function without specifying mode style if not", "= ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra = \"\" if result is not", "\"cval type does not match stencil return type.\" raise NumbaValueError(msg)", "the value \"returned\" by the kernel into the result array.", "if any of the relatively indexed # arrays are of", "mode to add the loop index to a user-specified slice.", "ir.Var(scope, one_var, loc) var_index_vars += [index_var] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var", "with different types that are not compatible # (e.g. values", "# dimension in the input array. Here we create the", "return type.\" raise NumbaValueError(msg) out_init =\"{} = np.full({}, {}, dtype=np.{})\\n\".format(", "= b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] = prev_block # Add", "compatible # (e.g. values as float[:] and flags as bool[:])", "self.kernel_ir, argtys, None, {}) if isinstance(return_type, types.npytypes.Array): raise NumbaValueError( \"Stencil", "offset += 1 for j in range(offset): func_text += \"", "# Store the index used after looking up the variable", "self._install_type(self._typingctx) self.neighborhood = self.options.get(\"neighborhood\") self._type_cache = {} self._lower_me = StencilFuncLowerer(self)", "relatively indexed arrays, add a call to # a function", "# split block across __sentinel__ # A new block is", "numba.core import types, typing, utils, ir, config, ir_utils, registry from", "ir_utils.mk_unique_var(\"stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) stmt_index_var_typ = typemap[stmt_index_var.name] #", "wouldn't fail) for arg in literal_unroll(args): if a.ndim != arg.ndim:", "array. shape_name = ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text += \" {} =", "+= \" {} = {}.shape\\n\".format(shape_name, first_arg) # Converts cval to", "NumbaValueError(\"The first argument to a stencil kernel must \" \"use", "original # calltypes then add the type associated with this", "real index and is computed by # adding the relative", "(e.g. values as float[:] and flags as bool[:]) # When", "argtys[0].ndim)) argtys_extra = argtys sig_extra = \"\" result = None", "other callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir, calltypes) # The", "the maximum absolute value # index used in the kernel", "= 0 def __init__(self, kernel_ir, mode, options): self.id = type(self).id_counter", "input array.\") from numba.core import typed_passes typemap, return_type, calltypes, _", "in each block... for stmt in ir.blocks[block_label].body: # Copy the", "in kwargs: result = kwargs['out'] rdtype = result.dtype rttype =", "for CPU context currently self._typingctx = registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context", "sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra] = (sig, result, typemap, calltypes)", "its calltype information. We need a copy of the calltypes", "the getitem calls. So, in effect array[-1] becomes array[index0-1]. \"\"\"", "options: if option not in [\"cval\", \"standard_indexing\", \"neighborhood\"]: raise ValueError(\"Unknown", "import NumbaValueError from numba.misc.special import literal_unroll import numba import operator", "1:] # But the current block gets a new label.", "ret_blocks = [x + stencil_stub_last_label for x in ret_blocks] if", "the type of this particular part of the index tuple.", "from numba.core import typed_passes typemap, return_type, calltypes, _ = typed_passes.type_inference_stage(", "result, typemap, calltypes) = self._type_cache[argtys] new_func = self._stencil_wrapper(result, sigret, return_type,", "ir_utils.dump_blocks(kernel.blocks) if neighborhood is None: need_to_calc_kernel = True else: need_to_calc_kernel", ">= 1: print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) # The return in the", "i) ranges.append((lo, hi)) # If there are more than one", "an array name \" \"not present in the stencil kernel", "self._targetctx, self.kernel_ir, argtys, None, {}) if isinstance(return_type, types.npytypes.Array): raise NumbaValueError(", "ir as lir from numba.core import types, typing, utils, ir,", "labels in the kernel copy so they are guaranteed unique", "original statement is in the original # calltypes then add", "(float[:], bool[:]) wouldn't fail) for arg in literal_unroll(args): if a.ndim", "block maintains the current block # label. prev_block = ir.Block(scope,", "first_arg in standard_indexed: raise NumbaValueError(\"The first argument to a stencil", "later. const_dict[stmt.target.name] = stmt.value.value if ((isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr)", "a tuple from the index ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars, loc)", "value into # the array using the tuple index. si", "specifying mode style if not isinstance(func_or_mode, str): mode = 'constant'", "index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) else: # Convert the", "function IR # after label and variable renaming of the", "definition.\") # Add index variables to getitems in the IR", "+= \",\" + other_array func_text += \")\\n\" # Get the", "\" \"smaller the same dimension in the first \" \"stencil", "to allocate the array. if result is None: return_type_name =", "current block is used for statements after sentinel. block.body =", "generated function for each # dimension in the input array.", "index variable 'index0'. # tmpvar will hold the real index", "return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate a Numba", "first \" \"stencil input.\") def slice_addition(the_slice, addend): \"\"\" Called by", "= ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) + 1 #", "StencilFunc. ''' def __init__(self, sf): self.stencilFunc = sf def __call__(self,", "ret_blocks.append(label) # If 1D array then avoid the tuple construction.", "match array dimensionality.\") return (neighborhood, relatively_indexed) def get_return_type(self, argtys): if", "'name') if stmt_index_var.name in tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]] elif stmt_index_var.name", "argtys_extra in self._type_cache: (_sig, _, _, _) = self._type_cache[argtys_extra] return", "includes each dimension's index variable as part of the getitem", "the IR. We will # remove this sentinel assignment and", "built by StencilFunc._install_type(). Return the call-site signature. \"\"\" if (self.neighborhood", "raise_if_incompatible_array_sizes(a, *args): ashape = a.shape # We need literal_unroll here", "func_or_mode func = None for option in options: if option", "signature(real_ret, *argtys_extra) dummy_text = (\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text)", "None) res = context.call_internal(builder, cres.fndesc, sig, args) context.add_linking_libs([cres.library]) return res", "+ [result_type]) else: result = None array_types = tuple([typing.typeof.typeof(x) for", "cache first if argtys_extra in self._type_cache: (_sig, _, _, _)", "the reserved word 'out' in stencil kernels.\") sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\",", "statements. \"\"\" ret_blocks = [] for label, block in blocks.items():", "the input array. Those loop nests use the # computed", "if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT >= 1:", "return statements in the IR and replace them with a", "function. for i in range(the_array.ndim): for j in range(offset): func_text", "for j in range(offset): func_text += \" \" # ranges[i][0]", "= result.dtype rttype = numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result))", "+= [index_var] tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar = ir.Var(scope, tmpname, loc)", "NumbaValueError(msg) out_init =\"{} = np.full({}, {}, dtype=np.{})\\n\".format( out_name, shape_name, cval_as_str(cval),", "each dimension, which could be either int # or variable.", "sig_extra = \"\" result = None if 'out' in kwtys:", "!= argtys[0].ndim): raise NumbaValueError(\"%d dimensional neighborhood specified \" \"for %d", "out_init else: # result is present, if cval is set", "argtys[0].ndim): raise NumbaValueError(\"%d dimensional neighborhood specified \" \"for %d dimensional", "in the stencil kernel to the block # containing statements", "ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined stencil function with the replaced", "registry from numba.core.typing.templates import (CallableTemplate, signature, infer_global, AbstractTemplate) from numba.core.imputils", "tmpvar, loc), stmt.target, loc)) else: acc_call = ir.Expr.binop(operator.add, stmt_index_var, index_var,", "calltypes, *array_types_full) if result is None: return new_func.entry_point(*args) else: return", "bool[:]) wouldn't fail) for arg in literal_unroll(args): if a.ndim !=", "any entry in the array from being used. func_text +=", "A special type to hold stencil information for the IR.", "to try to compute elements where # elements outside the", "input arrays with different types that are not compatible #", "the block labels that contained return statements. \"\"\" ret_blocks =", "# If 1D array then avoid the tuple construction. if", "else: result = None array_types = tuple([typing.typeof.typeof(x) for x in", "the minimum labelled block of # the parfor body. prev_block.append(ir.Jump(body_first_label,", "s_index_var = ir.Var(scope, s_index_name, loc) const_index_vars = [] ind_stencils =", "array. if stmt.value.op == 'getitem': stmt_index_var = stmt.value.index else: stmt_index_var", "indexing, not standard indexing.\") if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:", "args) context.add_linking_libs([cres.library]) return res @register_jitable def raise_if_incompatible_array_sizes(a, *args): ashape =", "ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood is None: need_to_calc_kernel = True", "\"\"\" id_counter = 0 def __init__(self, kernel_ir, mode, options): self.id", "False if len(neighborhood) != ndim: raise ValueError(\"%d dimensional neighborhood specified", "after looking up the variable in # the const dictionary.", "config.DEBUG_ARRAY_OPT >= 1: print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0], types.npytypes.Array):", "if \"cval\" in self.options: cval = self.options[\"cval\"] cval_ty = typing.typeof.typeof(cval)", "kernel body becomes the body of a loop, for which", "'index0'. # tmpvar will hold the real index and is", "current block gets a new label. body_first_label = min(kernel_copy.blocks.keys()) #", "across the dimensions of the input array. Those loop nests", "outline for the sentinel. for label, block in stencil_ir.blocks.items(): for", "we do # unary - on that to get the", "[x + stencil_stub_last_label for x in ret_blocks] if config.DEBUG_ARRAY_OPT >=", "out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps,", "loc) # Build a tuple from the index ir.Var's. tuple_call", "= ir_utils.mk_unique_var(\"stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) stmt_index_var_typ = typemap[stmt_index_var.name]", "kernel_ir, mode, options): self.id = type(self).id_counter type(self).id_counter += 1 self.kernel_ir", "then we # have to add the index value with", "sig, args) context.add_linking_libs([cres.library]) return res @register_jitable def raise_if_incompatible_array_sizes(a, *args): ashape", "used for statements after sentinel. block.body = block.body[i + 1:]", "1: print(\"_stencil_wrapper\", return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate", "= ir.copy() kernel_copy.blocks = {} # For each block... for", "maximum index # in this dimension because negative maximums would", "loc)) else: index_vars = [] sum_results = [] s_index_name =", "kernel IR into existence. # Copy the kernel so that", "= {} kernel_consts = [] if config.DEBUG_ARRAY_OPT >= 1: print(\"add_indices_to_kernel\",", "= ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ,", "# Get a list of the standard indexed array names.", "stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 # Shift labels in the", "0\\n\".format(sentinel_name) func_text += \" return {}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT >= 1:", "in ir.blocks[block_label].body: # Copy the statement to the new copy", "precluded. # ranges[i][1] is the maximum of 0 and the", "else: stmt_index_var = stmt.value.index_var # allow static_getitem since rewrite passes", "the IR. \"\"\" id_counter = 0 def __init__(self, kernel_ir, mode,", "index_vars = [] sum_results = [] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var", "nests use the # computed stencil kernel size so as", "has some dimension \" \"smaller the same dimension in the", "= ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table,", "body. prev_block.append(ir.Jump(body_first_label, loc)) # Add all the parfor loop body", "stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) + 1 # Adjust ret_blocks to", "StencilFunc._install_type(). Return the call-site signature. \"\"\" if (self.neighborhood is not", "take # multiple input arrays with different types that are", "loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar = ir.Var(scope, out_name, loc) #", "ir.Var(scope, s_index_name, loc) const_index_vars = [] ind_stencils = [] stmt_index_var_typ", "specification. neighborhood = [[0,0] for _ in range(ndim)] if len(kernel_consts)", "if isinstance(te, int): neighborhood[i][0] = min(neighborhood[i][0], te) neighborhood[i][1] = max(neighborhood[i][1],", "We found the sentinel assignment. loc = inst.loc scope =", "wrapper def _stencil(mode, options): if mode != 'constant': raise ValueError(\"Unsupported", "dimensions of the input array. Those loop nests use the", "typemap, return_type, calltypes, _ = typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir, argtys,", "in the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label =", "Split the block containing the sentinel assignment and remove the", "loc), stmt.target, loc)) else: index_vars = [] sum_results = []", "self.id = type(self).id_counter type(self).id_counter += 1 self.kernel_ir = kernel_ir self.mode", "neighborhood=None\" # look in the type cache first if argtys_extra", "= numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x)", "mode = func_or_mode func = None for option in options:", "self._lower_me = StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks, index_vars, out_name): \"\"\" Find", "absolute value # index used in the kernel specification. neighborhood", "+= \", out=None\" result = kwtys['out'] if 'neighborhood' in kwtys:", "else: new_body.append(stmt) block.body = new_body if need_to_calc_kernel: # Find the", "\"\"\" _ty_cls = type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self,", "use it if \"cval\" in self.options: cval = self.options[\"cval\"] cval_ty", "if config.DEBUG_ARRAY_OPT >= 1: print(\"name_var_table\", name_var_table, sentinel_name) the_array = args[0]", "for that # particular point in the iteration space. ret_blocks", "in range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo = kernel_size[i][0] hi =", "body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir,", "# Create extra signature for out and neighborhood. out_name =", "None: return wrapper(func) return wrapper def _stencil(mode, options): if mode", "a tuple that can # index the array. for dim", "out_init =\"{} = np.full({}, {}, dtype=np.{})\\n\".format( out_name, shape_name, cval_as_str(cval), return_type_name)", "= kernel_size if config.DEBUG_ARRAY_OPT >= 1: print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) #", "x in args]) array_types_full = tuple([typing.typeof.typeof(x) for x in args]", "type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate a Numba function to", "account for addition of the offset. ret_blocks = [x +", "exec(func_text) in globals(), locals() stencil_func = eval(stencil_func_name) if sigret is", "with any labels in the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks,", "after sentinel. block.body = block.body[i + 1:] # But the", "numba.core.typing.templates import (CallableTemplate, signature, infer_global, AbstractTemplate) from numba.core.imputils import lower_builtin", "array would be needed. # 2) The but of the", "this callsite # won't effect other callsites. (kernel_copy, copy_calltypes) =", "[index_var] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) #", "loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt) block.body = new_body if", "ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar =", "ValueError(\"Unsupported mode style \" + mode) def decorated(func): from numba.core", "the real index and is computed by # adding the", "new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc)) const_index_vars += [tmpvar] index_var = ir.Var(scope,", "# Add the loop nests to the new function. for", "the sentinel assignment. loc = inst.loc scope = block.scope #", "blocks that previously contained # a return in the stencil", "config.DEBUG_ARRAY_OPT >= 1: print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) # The return in", "tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: acc_call", "1 # Shift labels in the kernel copy so they", "variable renaming of the stencil kernel IR to prevent #", "the IR of this new function. # 4) Split the", "stmt_index_var.name in const_dict: kernel_consts += [const_dict[stmt_index_var.name]] else: raise NumbaValueError(\"stencil kernel", "= ir.Expr.binop(operator.add, stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,", "from numba.core import compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename", "of the standard indexed array names. standard_indexed = self.options.get(\"standard_indexing\", [])", "stmt_index_var = stmt.value.index else: stmt_index_var = stmt.value.index_var # allow static_getitem", "the tuple index. si = ir.SetItem(rvar, s_index_var, stmt.value, loc) new_body.append(si)", "any of the relatively indexed # arrays are of different", "= \"\" result = None if 'out' in kwtys: argtys_extra", "loc)) else: acc_call = ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar,", "labels in the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label", "stmt in calltypes: copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label] = new_block return", "Get the IR for the newly created stencil function. from", "We will # remove this sentinel assignment and replace it", "arrays are given, the second and third # are iterated", "for stmt in block.body: if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Const)):", "without specifying mode style if not isinstance(func_or_mode, str): mode =", "config.DEBUG_ARRAY_OPT >= 1: print(\"_stencil_wrapper\", return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) #", "te) else: raise NumbaValueError( \"stencil kernel index is not constant,\"", "have same number \" \" of dimensions as the first", "index0. index_var = ir.Var(scope, index_names[0], loc) tmpname = ir_utils.mk_unique_var(\"stencil_index\") tmpvar", "found a getitem from the input array. if stmt.value.op ==", "# unique stencil function name, the parameters to the stencil", "{} kernel_copy = ir.copy() kernel_copy.blocks = {} # For each", "block.loc new_body = [] for stmt in block.body: if (isinstance(stmt,", "out_name, neighborhood_name, shape_name] + kernel_copy.arg_names + index_vars) for name, var", "var_index_vars += [index_var] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name,", "that to get the positive offset in this dimension whose", "are of different size than the first input array. if", "with this # statement to the calltypes copy. scopy =", "= ir.Block(scope, loc) prev_block.body = block.body[:i] # The current block", "IR. for (l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label]", "StencilFunc(kernel_ir, mode, options) return decorated @lower_builtin(stencil) def stencil_dummy_lower(context, builder, sig,", "tuple index. si = ir.SetItem(rvar, s_index_var, stmt.value, loc) new_body.append(si) else:", "in index0. index_var = ir.Var(scope, index_names[0], loc) tmpname = ir_utils.mk_unique_var(\"stencil_index\")", "the parfor loop body blocks to the gufunc # function's", "self.options.get(\"standard_indexing\", []) if first_arg in standard_indexed: raise NumbaValueError(\"The first argument", "the type cache to find if result array is passed", "Create a copy of a given IR along with its", "label. body_first_label = min(kernel_copy.blocks.keys()) # The previous block jumps to", "inst.target.name == sentinel_name): # We found the sentinel assignment. loc", "indexing requested for an array name \" \"not present in", "None, {}) if isinstance(return_type, types.npytypes.Array): raise NumbaValueError( \"Stencil kernel must", "# in this dimension because negative maximums would not cause", "sentinel. block.body = block.body[i + 1:] # But the current", "s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) const_index_vars =", "are iterated over in the loop below. Without literal_unroll, their", "block.body = block.body[i + 1:] # But the current block", "stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] = prev_block #", "(\"for {} in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0], shape_name, i, ranges[i][1])", "kernel definition.\") # Add index variables to getitems in the", "have to extract # individual elements out of the tuple", "and is computed by # adding the relative offset in", "from numba.np import numpy_support class StencilFuncLowerer(object): '''Callable class responsible for", "for the statements prior to the # sentinel but the", "variables into # ir.Var's. var_index_vars = [] for one_var in", "[tuple_table[stmt_index_var.name]] elif stmt_index_var.name in const_dict: kernel_consts += [const_dict[stmt_index_var.name]] else: raise", "to extract # individual elements out of the tuple indexing", "int # or variable. In the latter case we'll use", "input array\".format( len(self.neighborhood), args[0].ndim)) if 'out' in kwargs: result =", "standard_indexed, typemap, copy_calltypes) if self.neighborhood is None: self.neighborhood = kernel_size", "index variable as part of the getitem calls. So, in", "\" \" # Put a sentinel in the code so", "ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call = ir.Expr.build_tuple(ind_stencils,", "and installs a typing class for a StencilFunc object in", "# Copy the statement to the new copy of the", "standard_indexed): # We found a getitem from the input array.", "_stencil(mode, options) if func is not None: return wrapper(func) return", "0: raise NumbaValueError(\"Stencil kernel with no accesses to \" \"relatively", "to a stencil kernel must \" \"use relative indexing, not", "is not None: pysig = utils.pysignature(stencil_func) sigret.pysig = pysig #", "= stmt_index_var_typ[dim] else: one_index_typ = stmt_index_var_typ[:] # If the array", "return_type_name) else: out_init =\"{} = np.zeros({}, dtype=np.{})\\n\".format( out_name, shape_name, return_type_name)", "literal_unroll import numba import operator from numba.np import numpy_support class", "print(\"After replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to form the new", "is None: self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT >= 1: print(\"After", ">= 1: print(\"add_indices_to_kernel\", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood is None:", "# argument to the function. ranges = [] for i", "to add the index value with a call to #", "the new stencil function into existence. exec(func_text) in globals(), locals()", "raise ValueError(\"Unsupported mode style \" + mode) def decorated(func): from", "the relative offset in stmt.value.index to # the current absolute", "out_name, loc) # Write the return statements original value into", "bool[:]) (Just (float[:], bool[:]) wouldn't fail) for arg in literal_unroll(args):", "kernel. This function definition includes a # unique stencil function", "there are more than one relatively indexed arrays, add a", "sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) # Search all the block in the", "is the maximum of 0 and the observed maximum index", "stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc),", "conflicts with the stencil function IR. # 5) Compile the", "given, the second and third # are iterated over in", "options) if func is not None: return wrapper(func) return wrapper", "the input array would be needed. # 2) The but", "block in the stencil outline for the sentinel. for label,", "NumbaValueError( \"Stencil kernel must return a scalar and not a", "3) Get the IR of this new function. # 4)", "return str(cval) # If we have to allocate the output", "in the array from being used. func_text += (\"for {}", "in the array. # So, take the minimum of 0", "to compute elements where # elements outside the bounds of", "'getitem': stmt_index_var = stmt.value.index else: stmt_index_var = stmt.value.index_var # allow", "= ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func = numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func)", "stmt.value.value) # Remember consts for use later. const_dict[stmt.target.name] = stmt.value.value", "tuple indexing # expression and add the corresponding index variable", "\" \"be the primary input array.\") from numba.core import typed_passes", "the array is indexed with a slice then we #", "Adjust ret_blocks to account for addition of the offset. ret_blocks", "to them and then reconstitute as a tuple that can", "float[:] and flags as bool[:]) # When more than three", "as above but you have to extract # individual elements", "... index_vars = [] for i in range(the_array.ndim): index_var_name =", "slice_addition. if isinstance(one_index_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func", "have to allocate the output array (the out argument was", "self.options = options self.kws = [] # remember original kws", "NumbaValueError(msg) out_init = \"{}[:] = {}\\n\".format(out_name, cval_as_str(cval)) func_text += \"", "\"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]) offset += 1 for", "one_var, loc) var_index_vars += [index_var] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var =", "ir.Var(scope, tmpname, loc) ind_stencils += [tmpvar] getitemname = ir_utils.mk_unique_var(\"getitem\") getitemvar", "enumerate(block.body): if (isinstance( inst, ir.Assign) and inst.target.name == sentinel_name): #", "you have to extract # individual elements out of the", "raise ValueError(\"{} dimensional neighborhood specified for {} \" \"dimensional input", "kwargs) (real_ret, typemap, calltypes) = self.get_return_type(array_types) new_func = self._stencil_wrapper(result, None,", "allow static_getitem since rewrite passes are applied #raise ValueError(\"Unexpected static_getitem", "would be needed. # 2) The but of the loop", "match stencil return type.\" raise NumbaValueError(msg) out_init = \"{}[:] =", "total arrays are given, the second and third # are", "loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (),", "= stmt_index_var_typ[:] # If the array is indexed with a", "effect array[-1] becomes array[index0-1]. \"\"\" const_dict = {} kernel_consts =", "this # statement to the calltypes copy. scopy = copy.deepcopy(stmt)", "None: self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT >= 1: print(\"After add_indices_to_kernel\")", "particular point in the iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars,", "= types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa = ir.Global(\"slice_addition\", sa_func, loc)", "the stencil kernel to the block # containing statements after", "calltypes) = self._type_cache[argtys] new_func = self._stencil_wrapper(result, sigret, return_type, typemap, calltypes,", "a jump from all the blocks that previously contained #", "ValueError(\"Unknown stencil option \" + option) wrapper = _stencil(mode, options)", "will execute the stencil kernel. This function definition includes a", "func = None for option in options: if option not", "stmt.value.value.name in kernel.arg_names) or (isinstance(stmt, ir.SetItem) and stmt.target.name in kernel.arg_names)):", "ir.Var(scope, out_name, loc) # Write the return statements original value", "the loop nests to the new function. for i in", "IR to prevent # conflicts with the stencil function IR.", "if stmt in calltypes: copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label] = new_block", "into existence. exec(func_text) in globals(), locals() stencil_func = eval(stencil_func_name) if", "to the new copy of the kernel # and if", "a negative number (potentially -0). Then, we do # unary", "ir.Assign) and isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT >= 1: print(\"remembering in", "or (isinstance(stmt, ir.SetItem) and stmt.target.name in kernel.arg_names)): raise ValueError(\"Assignments to", "index is not \" \"constant, 'neighborhood' option required\") if ndim", "definition for the stencil function # that will execute the", "{} \" \"dimensional input array\".format( len(self.neighborhood), args[0].ndim)) if 'out' in", "Here we create the name for # the index variable", "the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes) if self.neighborhood is None: self.neighborhood", "for x in args] + [result_type]) else: result = None", "in stmt.value.index to # the current absolute location in index0.", "and len(self.neighborhood) != argtys[0].ndim): raise NumbaValueError(\"%d dimensional neighborhood specified \"", "loc)) slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc) calltypes[slice_addition_call] =", "the minimum index used in the i'th dimension # but", "name, the parameters to the stencil kernel, loop # nests", "# unary - on that to get the positive offset", "example failing signature without literal_unroll might be # (float[:], float[:],", "in this dimension because negative maximums would not cause us", "cval stencil decorator option # or np.zeros if they didn't", "that includes each dimension's index variable as part of the", "index_len = len(index) elif isinstance(index, int): neighborhood[0][0] = min(neighborhood[0][0], index)", "which could be either int # or variable. In the", "in kwtys: argtys_extra += (kwtys['neighborhood'],) sig_extra += \", neighborhood=None\" #", "np.zeros if they didn't to allocate the array. if result", "= ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if config.DEBUG_ARRAY_OPT >= 1: print(\"name_var_table\", name_var_table, sentinel_name)", "((isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['setitem', 'static_setitem']", "== 1: # Single dimension always has index variable 'index0'.", "if return_type.dtype != typing.typeof.typeof(cval): msg = \"cval type does not", "ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc)) # Get the type", "[] for label, block in blocks.items(): scope = block.scope loc", "to find if result array is passed (_, result, typemap,", "set(kernel_copy.arg_names)) != 0: raise NumbaValueError(\"Standard indexing requested for an array", "the newly created stencil function. from numba.core import compiler stencil_ir", "either int # or variable. In the latter case we'll", "StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks, index_vars, out_name): \"\"\" Find return statements", "if config.DEBUG_ARRAY_OPT >= 1: print(\"After replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start", "in the loop below. Without literal_unroll, their # types have", "function IR + stencil kernel IR into existence. # Copy", "1: print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) # The return in the stencil", "+ out_init else: # result is present, if cval is", "argument to the function. ranges = [] for i in", "the first input array. shape_name = ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text +=", "name for # the index variable for each dimension. index0,", "the user into one that includes each dimension's index variable", "name_var_table) index_vars += [index_var_name] # Create extra signature for out", "for i, inst in enumerate(block.body): if (isinstance( inst, ir.Assign) and", "{}\\n\".format(out_name, cval_as_str(cval)) func_text += \" \" + out_init offset =", "object in the input typing context. \"\"\" _ty_cls = type('StencilFuncTyping_'", "+ index_vars) for name, var in var_table.items(): if not name", "values as float[:] and flags as bool[:]) # When more", "argument was not used) # then us numpy.full if the", "stencil kernel becomes a setitem for that # particular point", "NumbaValueError(\"The first argument to a stencil kernel must \" \"be", "= max(stencil_ir.blocks.keys()) + 1 # Shift labels in the kernel", "ndim, neighborhood, standard_indexed, typemap, calltypes): \"\"\" Transforms the stencil kernel", "# Force the new stencil function into existence. exec(func_text) in", "dimension. index0, index1, ... index_vars = [] for i in", "the bounds of the input array would be needed. #", "# are iterated over in the loop below. Without literal_unroll,", "isinstance(kernel_size[i][0], int): lo = kernel_size[i][0] hi = kernel_size[i][1] else: lo", "not isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError(\"The first argument to a stencil", "accesses # in the kernel from relative to regular Python", "of the stencil kernel IR to prevent # conflicts with", "to match. # An example failing signature without literal_unroll might", "idea as above but you have to extract # individual", "sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if config.DEBUG_ARRAY_OPT >= 1: print(\"name_var_table\", name_var_table,", "is not None: sig_extra += \", {}=None\".format(out_name) if \"neighborhood\" in", "and here # create the unique name of this function.", "class built by StencilFunc._install_type(). Return the call-site signature. \"\"\" if", "transition the accesses # in the kernel from relative to", "new_func def __call__(self, *args, **kwargs): if (self.neighborhood is not None", "dimensional neighborhood specified for {} \" \"dimensional input array\".format( len(self.neighborhood),", "in index_vars: index_var = ir.Var(scope, one_var, loc) var_index_vars += [index_var]", "return sig def copy_ir_with_calltypes(self, ir, calltypes): \"\"\" Create a copy", "in the stencil kernel definition.\") # Add index variables to", "current block # label. prev_block = ir.Block(scope, loc) prev_block.body =", "# ranges[i][0] is the minimum index used in the i'th", "stmt.value.index else: stmt_index_var = stmt.value.index_var # allow static_getitem since rewrite", "will hold the real index and is computed by #", "const_index_vars = [] ind_stencils = [] stmt_index_var_typ = typemap[stmt_index_var.name] #", "other_array func_text += \")\\n\" # Get the shape of the", "not name in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label", "block.scope loc = block.loc new_body = [] for stmt in", "then add the type associated with this # statement to", "flags as bool[:]) # When more than three total arrays", "stencil kernel as specified by the user into one that", "neighborhood = [[0,0] for _ in range(ndim)] if len(kernel_consts) ==", "= typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir, argtys, None, {}) if isinstance(return_type,", "return \"np.nan\" elif np.isinf(cval): if cval < 0: return \"-np.inf\"", "if result is None: return new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,))) def", "dimensional neighborhood specified \" \"for %d dimensional input array\" %", "stmt in ir.blocks[block_label].body: # Copy the statement to the new", "# index used in the kernel specification. neighborhood = [[0,0]", "len(self.neighborhood), args[0].ndim)) if 'out' in kwargs: result = kwargs['out'] rdtype", "out_name, shape_name, return_type_name) func_text += \" \" + out_init else:", "= [] for i in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name(\"index\" +", "raise NumbaValueError(\"The first argument to a stencil kernel must \"", "calltypes, _ = typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir, argtys, None, {})", ">= 1: print(\"new stencil func text\") print(func_text) # Force the", "Returns the block labels that contained return statements. \"\"\" ret_blocks", "(self.neighborhood is not None and len(self.neighborhood) != argtys[0].ndim): raise NumbaValueError(\"%d", "in tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]] elif stmt_index_var.name in const_dict: kernel_consts", "a string constant def cval_as_str(cval): if not np.isfinite(cval): # See", "signature without literal_unroll might be # (float[:], float[:], bool[:]) (Just", "loc)) const_index_vars += [tmpvar] index_var = ir.Var(scope, index_names[dim], loc) index_vars", "name_var_table) neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra = \"\" if result", "sig_extra = \"\" if result is not None: sig_extra +=", "relative offset in stmt.value.index to # the current absolute location", "with a call to # slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var", "None, compiler.DEFAULT_FLAGS, {}) return new_func def __call__(self, *args, **kwargs): if", "stmt.value.index_var # allow static_getitem since rewrite passes are applied #raise", "if config.DEBUG_ARRAY_OPT >= 1: print(\"new stencil func text\") print(func_text) #", "<reponame>auderson/numba<filename>numba/stencils/stencil.py<gh_stars>1000+ # # Copyright (c) 2017 Intel Corporation # SPDX-License-Identifier:", "is allocated for the statements prior to the # sentinel", "index_vars) for name, var in var_table.items(): if not name in", "for the IR. \"\"\" id_counter = 0 def __init__(self, kernel_ir,", "in the kernel copy so they are guaranteed unique #", "first_arg for other_array in relatively_indexed: if other_array != first_arg: func_text", "sa_func_typ g_sa = ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call", "the parfor body. prev_block.append(ir.Jump(body_first_label, loc)) # Add all the parfor", "name_var_table, typemap, copy_calltypes) if \"out\" in name_var_table: raise NumbaValueError(\"Cannot use", "the index ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc))", "stencil index.\") if index_len != ndim: raise NumbaValueError( \"Stencil index", "{}=None\".format(out_name) if \"neighborhood\" in dict(self.kws): sig_extra += \", {}=None\".format(neighborhood_name) #", "to the stencil kernel, loop # nests across the dimensions", "of different size than the first input array. if len(relatively_indexed)", "# slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc)", "# use is precluded. # ranges[i][1] is the maximum of", "information for the IR. \"\"\" id_counter = 0 def __init__(self,", "acc_call = ir.Expr.binop(operator.add, stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign(", "bounds of the input array would be needed. # 2)", "the stencil kernel IR into the stencil function IR #", "ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 # Shift labels", "the kernel # and if the original statement is in", "variables in stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {}", "= {}\\n\".format(out_name, cval_as_str(cval)) func_text += \" \" + out_init offset", "[] for one_var in index_vars: index_var = ir.Var(scope, one_var, loc)", "into the stencil function IR # after label and variable", "= copy.deepcopy(stmt) new_block.body.append(scopy) if stmt in calltypes: copy_calltypes[scopy] = calltypes[stmt]", "kernel_copy.blocks[block_label] = new_block return (kernel_copy, copy_calltypes) def _stencil_wrapper(self, result, sigret,", "self.kernel_ir, calltypes) # The stencil kernel body becomes the body", "NumbaValueError(\"Cannot use the reserved word 'out' in stencil kernels.\") sentinel_name", "func_text += \")\\n\" # Get the shape of the first", "the replaced loop # body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func =", "(len(neighborhood), ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set() for block", "range(len(index)): te = index[i] if isinstance(te, ir.Var) and te.name in", "type(self).id_counter += 1 self.kernel_ir = kernel_ir self.mode = mode self.options", "= [] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc)", "= self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra) dummy_text = (\"def __numba_dummy_stencil({}{}):\\n", "ValueError(\"Secondary stencil array does not have same number \" \"", "new_body return ret_blocks def add_indices_to_kernel(self, kernel, index_names, ndim, neighborhood, standard_indexed,", "= a.shape # We need literal_unroll here because the stencil", "func_text += \" \" # ranges[i][0] is the minimum index", "kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) # Search all the block in the stencil", "i, inst in enumerate(block.body): if (isinstance( inst, ir.Assign) and inst.target.name", "in kernel.blocks.values(): scope = block.scope loc = block.loc new_body =", "AbstractTemplate) from numba.core.imputils import lower_builtin from numba.core.extending import register_jitable from", "( hex(id(the_array)).replace(\"-\", \"_\"), self.id) # We will put a loop", "array is indexed with a slice then we # have", "numba.core import compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all", "sigret): # look in the type cache to find if", "argtys[0].ndim, argtys[0].layout) return (real_ret, typemap, calltypes) def _install_type(self, typingctx): \"\"\"Constructs", "function name, the parameters to the stencil kernel, loop #", "hi = \"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo, hi)) # If there are", "calltypes) def _install_type(self, typingctx): \"\"\"Constructs and installs a typing class", "sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]],", "This function definition includes a # unique stencil function name,", "in range(offset): func_text += \" \" # ranges[i][0] is the", "array. if len(relatively_indexed) > 1: func_text += \" raise_if_incompatible_array_sizes(\" +", "hex(id(the_array)).replace(\"-\", \"_\"), self.id) # We will put a loop nest", "input array. if stmt.value.op == 'getitem': stmt_index_var = stmt.value.index else:", "of the relatively indexed # arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel(", "func_text += \",\" + other_array func_text += \")\\n\" # Get", "type cache first if argtys_extra in self._type_cache: (_sig, _, _,", "index value with a call to # slice_addition. if isinstance(stmt_index_var_typ,", "the current absolute location in index0. index_var = ir.Var(scope, index_names[0],", "containing statements after the sentinel. for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append(", "IR # after label and variable renaming of the stencil", "# body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir( self._typingctx, self._targetctx,", "could be either int # or variable. In the latter", "\"\"\" if (self.neighborhood is not None and len(self.neighborhood) != argtys[0].ndim):", "to \" \"relatively indexed arrays.\") for index in kernel_consts: if", "the index variables into # ir.Var's. var_index_vars = [] for", "np.zeros({}, dtype=np.{})\\n\".format( out_name, shape_name, return_type_name) func_text += \" \" +", "= \"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra) # Get loop ranges for", "s_index_var, loc)) rvar = ir.Var(scope, out_name, loc) # Write the", "kernel_copy.arg_names + index_vars) for name, var in var_table.items(): if not", "s_index_name, loc) # Build a tuple from the index ir.Var's.", "= pysig # Get the IR for the newly created", "# and if the original statement is in the original", "in range(ndim)] if len(kernel_consts) == 0: raise NumbaValueError(\"Stencil kernel with", "import register_jitable from numba.core.errors import NumbaValueError from numba.misc.special import literal_unroll", "is not constant,\" \"'neighborhood' option required\") index_len = len(index) elif", "block of # the parfor body. prev_block.append(ir.Jump(body_first_label, loc)) # Add", "passed (_, result, typemap, calltypes) = self._type_cache[argtys] new_func = self._stencil_wrapper(result,", "function with the replaced loop # body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks)", "for addition of the offset. ret_blocks = [x + stencil_stub_last_label", "sig_extra += \", neighborhood=None\" # look in the type cache", "containing a function definition for the stencil function # that", "stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return new_func def __call__(self, *args,", "StencilFunc object in the input typing context. \"\"\" _ty_cls =", "is None: return new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options):", "name_var_table, sentinel_name) the_array = args[0] if config.DEBUG_ARRAY_OPT >= 1: print(\"_stencil_wrapper\",", "that # particular point in the iteration space. ret_blocks =", "array then avoid the tuple construction. if len(index_vars) == 1:", "jump from all the blocks that previously contained # a", "into # ir.Var's. var_index_vars = [] for one_var in index_vars:", "match stencil return type.\" raise NumbaValueError(msg) out_init =\"{} = np.full({},", "= new_body if need_to_calc_kernel: # Find the size of the", "_type_me(self, argtys, kwtys): \"\"\" Implement AbstractTemplate.generic() for the typing class", "# Shift labels in the kernel copy so they are", "= array_types if config.DEBUG_ARRAY_OPT >= 1: print(\"__call__\", array_types, args, kwargs)", "issue #7286 if np.isnan(cval): return \"np.nan\" elif np.isinf(cval): if cval", "use is precluded. # ranges[i][1] is the maximum of 0", "and inst.target.name == sentinel_name): # We found the sentinel assignment.", "in options: if option not in [\"cval\", \"standard_indexing\", \"neighborhood\"]: raise", "add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) # The return in the stencil kernel becomes", "= ir.Var(scope, index_names[dim], loc) index_vars += [index_var] tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\")", "in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] =", "None array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full =", "Start to form the new function to execute the stencil", "mode, options): self.id = type(self).id_counter type(self).id_counter += 1 self.kernel_ir =", "won't effect other callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir, calltypes)", "typemap, calltypes) = self.get_return_type(array_types) new_func = self._stencil_wrapper(result, None, real_ret, typemap,", "the const dictionary. if need_to_calc_kernel: assert hasattr(stmt_index_var, 'name') if stmt_index_var.name", "self._stencil_wrapper(result, sigret, return_type, typemap, calltypes, *argtys) return new_func def _type_me(self,", "# Get loop ranges for each dimension, which could be", "numba.core import compiler kernel_ir = compiler.run_frontend(func) return StencilFunc(kernel_ir, mode, options)", "+= [index_var] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc)", "+= \")\\n\" # Get the shape of the first input", "the array from being used. func_text += (\"for {} in", "compiler.DEFAULT_FLAGS, {}) return new_func def __call__(self, *args, **kwargs): if (self.neighborhood", "arrays are of different size than the first input array.", "calltypes): \"\"\" Transforms the stencil kernel as specified by the", "in ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body = [] # For", "block.scope # split block across __sentinel__ # A new block", "mode style if not isinstance(func_or_mode, str): mode = 'constant' #", "typemap, copy_calltypes) if \"out\" in name_var_table: raise NumbaValueError(\"Cannot use the", "kernel with no accesses to \" \"relatively indexed arrays.\") for", "list of the relatively indexed # arrays. kernel_size, relatively_indexed =", "accesses to \" \"relatively indexed arrays.\") for index in kernel_consts:", "new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else:", "for name, var in var_table.items(): if not name in reserved_names:", "ir_utils.remove_dels(stencil_ir.blocks) # rename all variables in stencil_ir afresh var_table =", "(the out argument was not used) # then us numpy.full", "config, ir_utils, registry from numba.core.typing.templates import (CallableTemplate, signature, infer_global, AbstractTemplate)", "const_dict[te.name] if isinstance(te, int): neighborhood[i][0] = min(neighborhood[i][0], te) neighborhood[i][1] =", "setitem for that # particular point in the iteration space.", "a special sentinel # assignment. # 3) Get the IR", "dimension in the input array. Here we create the name", "0 and the minimum index found in the kernel #", "\"{}[:] = {}\\n\".format(out_name, cval_as_str(cval)) func_text += \" \" + out_init", "The return in the stencil kernel becomes a setitem for", "ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if config.DEBUG_ARRAY_OPT >= 1: print(\"name_var_table\", name_var_table, sentinel_name) the_array", "kernel_size[i][0] hi = kernel_size[i][1] else: lo = \"{}[{}][0]\".format(neighborhood_name, i) hi", "else: mode = func_or_mode func = None for option in", "self.kws = [] # remember original kws arguments # stencils", "allocated for the statements prior to the # sentinel but", "failing signature without literal_unroll might be # (float[:], float[:], bool[:])", "i'th dimension # but minimum's greater than 0 don't preclude", "decorated(func): from numba.core import compiler kernel_ir = compiler.run_frontend(func) return StencilFunc(kernel_ir,", "in kernel.arg_names)): raise ValueError(\"Assignments to arrays passed to stencil \"", "# a function that will raise an error if any", "@register_jitable def raise_if_incompatible_array_sizes(a, *args): ashape = a.shape # We need", "as not to try to compute elements where # elements", "= \"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo, hi)) # If there are more", "If the array is indexed with a slice then we", "Find the size of the kernel by finding the maximum", "sig_extra += \", out=None\" result = kwtys['out'] if 'neighborhood' in", "For each block... for (block_label, block) in ir.blocks.items(): new_block =", "call to # a function that will raise an error", "\", {}=None\".format(out_name) if \"neighborhood\" in dict(self.kws): sig_extra += \", {}=None\".format(neighborhood_name)", "index) neighborhood[0][1] = max(neighborhood[0][1], index) index_len = 1 else: raise", "options self.kws = [] # remember original kws arguments #", "ir.Var(scope, index_names[dim], loc) index_vars += [index_var] tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar", "block # containing statements after the sentinel. for ret_block in", "return StencilFunc(kernel_ir, mode, options) return decorated @lower_builtin(stencil) def stencil_dummy_lower(context, builder,", "in the stencil outline for the sentinel. for label, block", "of the kernel by finding the maximum absolute value #", "copy propagation applied to the copied IR will change the", "stencil input.\") argshape = arg.shape for i in range(len(ashape)): if", "array does not have same number \" \" of dimensions", "ir_utils.dump_blocks(kernel_copy.blocks) # Search all the block in the stencil outline", "import operator from numba.np import numpy_support class StencilFuncLowerer(object): '''Callable class", "function's IR. for (l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b", "ir.Jump(new_label, loc)) break else: continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks)", "__init__(self, sf): self.stencilFunc = sf def __call__(self, context, builder, sig,", "prev_block.body = block.body[:i] # The current block is used for", "def __call__(self, context, builder, sig, args): cres = self.stencilFunc.compile_for_argtys(sig.args, {},", "call-site signature. \"\"\" if (self.neighborhood is not None and len(self.neighborhood)", "new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body = [] # For each statement", "Get the type of this particular part of the index", "1: print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError(\"The", "uses of the original IR invalid. \"\"\" copy_calltypes = {}", "name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes) if", "if isinstance(index, tuple) or isinstance(index, list): for i in range(len(index)):", "ir.Assign) and inst.target.name == sentinel_name): # We found the sentinel", "ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func = numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] =", "kernel.arg_names)): raise ValueError(\"Assignments to arrays passed to stencil \" \\", "on that to get the positive offset in this dimension", "new label. body_first_label = min(kernel_copy.blocks.keys()) # The previous block jumps", "index_vars, out_name): \"\"\" Find return statements in the IR and", "max(neighborhood[0][1], index) index_len = 1 else: raise NumbaValueError( \"Non-tuple or", "to hold stencil information for the IR. \"\"\" id_counter =", "argument to a stencil kernel must \" \"be the primary", "use the extra neighborhood # argument to the function. ranges", "new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1", "# Copyright (c) 2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause #", "copy_ir_with_calltypes(self, ir, calltypes): \"\"\" Create a copy of a given", "print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined stencil function with", "if config.DEBUG_ARRAY_OPT >= 1: print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the", "then use it if \"cval\" in self.options: cval = self.options[\"cval\"]", "one relatively indexed arrays, add a call to # a", "using the tuple index. si = ir.SetItem(rvar, s_index_var, stmt.value, loc)", "the index value with a call to # slice_addition. if", "numba.core.errors import NumbaValueError from numba.misc.special import literal_unroll import numba import", "= len(index) elif isinstance(index, int): neighborhood[0][0] = min(neighborhood[0][0], index) neighborhood[0][1]", "the stencil kernel, loop # nests across the dimensions of", "computed stencil kernel size so as not to try to", "if mode != 'constant': raise ValueError(\"Unsupported mode style \" +", "[const_dict[stmt_index_var.name]] else: raise NumbaValueError(\"stencil kernel index is not \" \"constant,", "to # a function that will raise an error if", "assert hasattr(stmt_index_var, 'name') if stmt_index_var.name in tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]]", "if index_len != ndim: raise NumbaValueError( \"Stencil index does not", "\"\"\" ret_blocks = [] for label, block in blocks.items(): scope", "options): self.id = type(self).id_counter type(self).id_counter += 1 self.kernel_ir = kernel_ir", "= args new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT >= 1: print(\"new_stencil_param_types\",", "tuple_call = ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar = ir.Var(scope,", "over in the loop below. Without literal_unroll, their # types", "statements original value into # the array using the tuple", "const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc)) # Get the type of", "Converts cval to a string constant def cval_as_str(cval): if not", "typed_passes typemap, return_type, calltypes, _ = typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir,", "stencil kernel, loop # nests across the dimensions of the", "for other_array in relatively_indexed: if other_array != first_arg: func_text +=", "not compatible # (e.g. values as float[:] and flags as", "to the calltypes copy. scopy = copy.deepcopy(stmt) new_block.body.append(scopy) if stmt", "Search all the block in the stencil outline for the", "and add the corresponding index variable # to them and", "\"use relative indexing, not standard indexing.\") if len(set(standard_indexed) - set(kernel_copy.arg_names))", "else: # result is present, if cval is set then", "the name for # the index variable for each dimension.", "AbstractTemplate.generic() for the typing class built by StencilFunc._install_type(). Return the", "the standard indexed array names. standard_indexed = self.options.get(\"standard_indexing\", []) if", "= block.body[:i] # The current block is used for statements", "[] # For each statement in each block... for stmt", "guaranteed unique # and don't conflict with any labels in", "Add a jump from all the blocks that previously contained", "stencil_func = eval(stencil_func_name) if sigret is not None: pysig =", "j in range(offset): func_text += \" \" # Put a", "as lir from numba.core import types, typing, utils, ir, config,", "by the kernel into the result array. Returns the block", "elements outside the bounds of the input array would be", "not used) # then us numpy.full if the user specified", "{} # For each block... for (block_label, block) in ir.blocks.items():", "def stencil_dummy_lower(context, builder, sig, args): \"lowering for dummy stencil calls\"", "# types have to match. # An example failing signature", "in the input typing context. \"\"\" _ty_cls = type('StencilFuncTyping_' +", "When more than three total arrays are given, the second", "(potentially -0). Then, we do # unary - on that", "1) Construct a string containing a function definition for the", "{} in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]) offset", "loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: index_vars =", "typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir, argtys, None, {}) if isinstance(return_type, types.npytypes.Array):", "\"returned\" by the kernel into the result array. Returns the", "required\") if ndim == 1: # Single dimension always has", "NumbaValueError( \"stencil kernel index is not constant,\" \"'neighborhood' option required\")", "statement to the calltypes copy. scopy = copy.deepcopy(stmt) new_block.body.append(scopy) if", "stencil kernels.\") sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if config.DEBUG_ARRAY_OPT >= 1:", "= self._stencil_wrapper(result, None, real_ret, typemap, calltypes, *array_types_full) if result is", "previously contained # a return in the stencil kernel to", "the stencil kernel definition.\") # Add index variables to getitems", "in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks,", "replace it with the IR for the # stencil kernel", "\" \"stencil input.\") def slice_addition(the_slice, addend): \"\"\" Called by stencil", "must \" \"use relative indexing, not standard indexing.\") if len(set(standard_indexed)", "is not None and len(self.neighborhood) != args[0].ndim): raise ValueError(\"{} dimensional", "each dimension's index variable as part of the getitem calls.", "shape_name = ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text += \" {} = {}.shape\\n\".format(shape_name,", "of the original IR invalid. \"\"\" copy_calltypes = {} kernel_copy", "in_cps, name_var_table, typemap, copy_calltypes) if \"out\" in name_var_table: raise NumbaValueError(\"Cannot", "use the reserved word 'out' in stencil kernels.\") sentinel_name =", "set() for block in kernel.blocks.values(): scope = block.scope loc =", "to # slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"),", "for _ in range(ndim)] if len(kernel_consts) == 0: raise NumbaValueError(\"Stencil", "True else: need_to_calc_kernel = False if len(neighborhood) != ndim: raise", "in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types,", "sf def __call__(self, context, builder, sig, args): cres = self.stencilFunc.compile_for_argtys(sig.args,", "= True else: need_to_calc_kernel = False if len(neighborhood) != ndim:", "maximum absolute value # index used in the kernel specification.", "len(kernel_consts) == 0: raise NumbaValueError(\"Stencil kernel with no accesses to", "out=None\" result = kwtys['out'] if 'neighborhood' in kwtys: argtys_extra +=", "for x in ret_blocks] if config.DEBUG_ARRAY_OPT >= 1: print(\"ret_blocks w/", "argtys, kwtys): \"\"\" Implement AbstractTemplate.generic() for the typing class built", "avoid the tuple construction. if len(index_vars) == 1: rvar =", "not self._typingctx.can_convert(cval_ty, return_type.dtype): msg = \"cval type does not match", "output array (the out argument was not used) # then", "= kernel_size[i][1] else: lo = \"{}[{}][0]\".format(neighborhood_name, i) hi = \"{}[{}][1]\".format(neighborhood_name,", "if the user specified a cval stencil decorator option #", "*array_types_full) if result is None: return new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,)))", "# and this will be a negative number (potentially -0).", "dimension, which could be either int # or variable. In", "a loop nest in the generated function for each #", "min(neighborhood[i][0], te) neighborhood[i][1] = max(neighborhood[i][1], te) else: raise NumbaValueError( \"stencil", "= eval(\"__numba_dummy_stencil\") sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra] =", "\" \"for %d dimensional input array\" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra", "cval_as_str(cval): if not np.isfinite(cval): # See if this is a", "types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return (real_ret, typemap, calltypes) def _install_type(self, typingctx):", "= ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types = args new_stencil_param_types =", "registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood = self.options.get(\"neighborhood\") self._type_cache = {}", ">= 1: print(\"After replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to form", "the blocks that previously contained # a return in the", "blocks, index_vars, out_name): \"\"\" Find return statements in the IR", "= types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return (real_ret, typemap, calltypes) def _install_type(self,", "can locate it in the IR. We will # remove", "typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg = \"cval type does", "preclude any entry in the array from being used. func_text", "=\"{} = np.full({}, {}, dtype=np.{})\\n\".format( out_name, shape_name, cval_as_str(cval), return_type_name) else:", "# ir.Var's. var_index_vars = [] for one_var in index_vars: index_var", "[] # remember original kws arguments # stencils only supported", "inst.loc scope = block.scope # split block across __sentinel__ #", "sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra] = (sig, result,", ">= 1: print(\"_stencil_wrapper\", return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We", "for an array name \" \"not present in the stencil", "to a user-specified slice. \"\"\" return slice(the_slice.start + addend, the_slice.stop", "i in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name(\"index\" + str(i), name_var_table) index_vars", "a SetItem call of the value \"returned\" by the kernel", "literal_unroll might be # (float[:], float[:], bool[:]) (Just (float[:], bool[:])", "relatively_indexed.add(stmt.value.value.name) # Store the index used after looking up the", "sigret, return_type, typemap, calltypes, *args): # Overall approach: # 1)", "in the kernel # and this will be a negative", "typemap, calltypes, *array_types_full) if result is None: return new_func.entry_point(*args) else:", "dict(self.kws): sig_extra += \", {}=None\".format(neighborhood_name) # Get a list of", "stmt_index_var.name in tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]] elif stmt_index_var.name in const_dict:", "globals(), locals() stencil_func = eval(stencil_func_name) if sigret is not None:", "if a.ndim != arg.ndim: raise ValueError(\"Secondary stencil array does not", "nests across the dimensions of the input array. Those loop", "string-repr numerical const, issue #7286 if np.isnan(cval): return \"np.nan\" elif", "ValueError(\"Secondary stencil array has some dimension \" \"smaller the same", "consts for use later. const_dict[stmt.target.name] = stmt.value.value if ((isinstance(stmt, ir.Assign)", "config.DEBUG_ARRAY_OPT >= 1: print(\"new stencil func text\") print(func_text) # Force", "and the minimum index found in the kernel # and", "\" \"constant, 'neighborhood' option required\") if ndim == 1: #", "does not match stencil return type.\" raise NumbaValueError(msg) out_init =", "else: # Convert the string names of the index variables", ">= 1: print(\"remembering in const_dict\", stmt.target.name, stmt.value.value) # Remember consts", "kwargs['out'] rdtype = result.dtype rttype = numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype,", "= ir.Var(scope, getitemname, loc) getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall,", "= ir.Var(scope, out_name, loc) ivar = ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar,", "= self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"After replace_return_with_setitem\",", "variable in # the const dictionary. if need_to_calc_kernel: assert hasattr(stmt_index_var,", "return {}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"new stencil func text\")", "tuple from the index ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call,", "have to match. # An example failing signature without literal_unroll", "ir.Var(scope, index_names[0], loc) tmpname = ir_utils.mk_unique_var(\"stencil_index\") tmpvar = ir.Var(scope, tmpname,", "of the calltypes because copy propagation applied to the copied", "index ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar", "rvar = ir.Var(scope, out_name, loc) # Write the return statements", "kernel must \" \"be the primary input array.\") from numba.core", "{}) return new_func def __call__(self, *args, **kwargs): if (self.neighborhood is", "# 2) The but of the loop nest in this", "*argtys) return new_func def _type_me(self, argtys, kwtys): \"\"\" Implement AbstractTemplate.generic()", "inst in enumerate(block.body): if (isinstance( inst, ir.Assign) and inst.target.name ==", "stmt.value, loc) new_body.append(si) else: new_body.append(stmt) block.body = new_body return ret_blocks", "if len(neighborhood) != ndim: raise ValueError(\"%d dimensional neighborhood specified for", "if neighborhood is None: need_to_calc_kernel = True else: need_to_calc_kernel =", "\"_\"), self.id) # We will put a loop nest in", "offset in this dimension whose # use is precluded. #", "\"-np.inf\" else: return \"np.inf\" else: return str(cval) # If we", "body of a loop, for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks)", "isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT >= 1: print(\"remembering in const_dict\", stmt.target.name,", "loop ranges for each dimension, which could be either int", "for j in range(offset): func_text += \" \" # Put", "new_var_dict = {} reserved_names = ([sentinel_name, out_name, neighborhood_name, shape_name] +", "if this is a string-repr numerical const, issue #7286 if", "stencil information for the IR. \"\"\" id_counter = 0 def", "statements after sentinel. block.body = block.body[i + 1:] # But", "{}) if isinstance(return_type, types.npytypes.Array): raise NumbaValueError( \"Stencil kernel must return", "string containing a function definition for the stencil function #", "{}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra) # Get loop ranges for each dimension,", "from numba.core.imputils import lower_builtin from numba.core.extending import register_jitable from numba.core.errors", "res @register_jitable def raise_if_incompatible_array_sizes(a, *args): ashape = a.shape # We", "ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types = args new_stencil_param_types = list(array_types)", "= compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return", "if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg = \"cval type does not", "# computed stencil kernel size so as not to try", "(kwtys['out'],) sig_extra += \", out=None\" result = kwtys['out'] if 'neighborhood'", "find if result array is passed (_, result, typemap, calltypes)", "[] stmt_index_var_typ = typemap[stmt_index_var.name] # Same idea as above but", "neighborhood[i][1] = max(neighborhood[i][1], te) else: raise NumbaValueError( \"stencil kernel index", "the stencil kernel. This function definition includes a # unique", "= [] for i in range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo", "kernel IR into the stencil function IR # after label", "[[0,0] for _ in range(ndim)] if len(kernel_consts) == 0: raise", "dictionary. if need_to_calc_kernel: assert hasattr(stmt_index_var, 'name') if stmt_index_var.name in tuple_table:", "as stencil index.\") if index_len != ndim: raise NumbaValueError( \"Stencil", "Single dimension always has index variable 'index0'. # tmpvar will", "result is None: return new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant',", "lower_builtin from numba.core.extending import register_jitable from numba.core.errors import NumbaValueError from", "kernel specification. neighborhood = [[0,0] for _ in range(ndim)] if", "neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood is None: need_to_calc_kernel = True else:", "= set() for block in kernel.blocks.values(): scope = block.scope loc", "elements out of the tuple indexing # expression and add", "replace sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) #", "# An example failing signature without literal_unroll might be #", "self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood = self.options.get(\"neighborhood\") self._type_cache = {} self._lower_me", "+= \", {}=None\".format(out_name) if \"neighborhood\" in dict(self.kws): sig_extra += \",", "Get loop ranges for each dimension, which could be either", "= max(neighborhood[i][1], te) else: raise NumbaValueError( \"stencil kernel index is", "stencil kernel body. func_text += \"{} = 0\\n\".format(sentinel_name) func_text +=", "in the kernel from relative to regular Python indexing. Returns", "that contained return statements. \"\"\" ret_blocks = [] for label,", "does not match stencil return type.\" raise NumbaValueError(msg) out_init =\"{}", "sig_extra += \", {}=None\".format(out_name) if \"neighborhood\" in dict(self.kws): sig_extra +=", "registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood = self.options.get(\"neighborhood\")", "operator from numba.np import numpy_support class StencilFuncLowerer(object): '''Callable class responsible", "latter case we'll use the extra neighborhood # argument to", "is not \" \"constant, 'neighborhood' option required\") if ndim ==", "= types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x) for x in", "sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa = ir.Global(\"slice_addition\", sa_func,", "in this dimension whose # use is precluded. # ranges[i][1]", "a numpy array.\") real_ret = types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return (real_ret,", "acc_call = ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call", "lowering calls to a specific StencilFunc. ''' def __init__(self, sf):", "min(neighborhood[0][0], index) neighborhood[0][1] = max(neighborhood[0][1], index) index_len = 1 else:", "context.add_linking_libs([cres.library]) return res @register_jitable def raise_if_incompatible_array_sizes(a, *args): ashape = a.shape", "[] for stmt in block.body: if isinstance(stmt, ir.Return): ret_blocks.append(label) #", "+ other_array func_text += \")\\n\" # Get the shape of", "first_arg = kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table =", "= registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood = self.options.get(\"neighborhood\") self._type_cache =", "numba.core import typed_passes typemap, return_type, calltypes, _ = typed_passes.type_inference_stage( self._typingctx,", "hi)) # If there are more than one relatively indexed", "Add all the parfor loop body blocks to the gufunc", "loc)) break else: continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array,", "types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func = numba.njit(slice_addition) sa_func_typ", "kernel so that our changes for this callsite # won't", "if 'out' in kwtys: argtys_extra += (kwtys['out'],) sig_extra += \",", "so as not to try to compute elements where #", "of 0 and the minimum index found in the kernel", "stmt.target.name in kernel.arg_names)): raise ValueError(\"Assignments to arrays passed to stencil", "for i in range(len(index)): te = index[i] if isinstance(te, ir.Var)", "the output array (the out argument was not used) #", "llvmlite import ir as lir from numba.core import types, typing,", "includes a # unique stencil function name, the parameters to", "See if this is a string-repr numerical const, issue #7286", "kernel_consts: if isinstance(index, tuple) or isinstance(index, list): for i in", "\"\"\" Transforms the stencil kernel as specified by the user", "new_body if need_to_calc_kernel: # Find the size of the kernel", "# Find the size of the kernel by finding the", "will put a loop nest in the generated function for", "tmpname, loc) ind_stencils += [tmpvar] getitemname = ir_utils.mk_unique_var(\"getitem\") getitemvar =", "\"\" if result is not None: sig_extra += \", {}=None\".format(out_name)", "index is not constant,\" \"'neighborhood' option required\") index_len = len(index)", "pysig = utils.pysignature(stencil_func) sigret.pysig = pysig # Get the IR", "slice then we # have to add the index value", "Create extra signature for out and neighborhood. out_name = ir_utils.get_unused_var_name(\"out\",", "the loop index to a user-specified slice. \"\"\" return slice(the_slice.start", "original value into # the array using the tuple index.", "in the kernel specification. neighborhood = [[0,0] for _ in", "array_types = args new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT >= 1:", "in self.options: cval = self.options[\"cval\"] cval_ty = typing.typeof.typeof(cval) if not", "the stencil kernel as specified by the user into one", "NumbaValueError(\"%d dimensional neighborhood specified \" \"for %d dimensional input array\"", "the # computed size of the stencil kernel and a", "calltypes) # The stencil kernel body becomes the body of", "standard_indexed = self.options.get(\"standard_indexing\", []) if first_arg in standard_indexed: raise NumbaValueError(\"The", "is set then use it if \"cval\" in self.options: cval", "self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT >= 1: print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks)", "replace_return_with_setitem(self, blocks, index_vars, out_name): \"\"\" Find return statements in the", "loop nests use the # computed stencil kernel size so", "neighborhood specified for {} \" \"dimensional input array\".format( len(self.neighborhood), args[0].ndim))", "word 'out' in stencil kernels.\") sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if", "return_type.dtype != typing.typeof.typeof(cval): msg = \"cval type does not match", "\")\\n\" # Get the shape of the first input array.", "new_body = [] for stmt in block.body: if (isinstance(stmt, ir.Assign)", "ir.blocks[block_label].body: # Copy the statement to the new copy of", "variable. In the latter case we'll use the extra neighborhood", "string constant def cval_as_str(cval): if not np.isfinite(cval): # See if", "sigret, return_type, typemap, calltypes, *argtys) return new_func def _type_me(self, argtys,", "numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa = ir.Global(\"slice_addition\",", "new copy of the kernel # and if the original", "so we can locate it in the IR. We will", "numpy_support class StencilFuncLowerer(object): '''Callable class responsible for lowering calls to", "class responsible for lowering calls to a specific StencilFunc. '''", "block... for (block_label, block) in ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body", "return type.\" raise NumbaValueError(msg) out_init = \"{}[:] = {}\\n\".format(out_name, cval_as_str(cval))", "to transition the accesses # in the kernel from relative", "some dimension \" \"smaller the same dimension in the first", "stencil return type.\" raise NumbaValueError(msg) out_init = \"{}[:] = {}\\n\".format(out_name,", "loc)) else: # Convert the string names of the index", "list of the standard indexed array names. standard_indexed = self.options.get(\"standard_indexing\",", "res = context.call_internal(builder, cres.fndesc, sig, args) context.add_linking_libs([cres.library]) return res @register_jitable", "tuple([typing.typeof.typeof(x) for x in args]) array_types_full = array_types if config.DEBUG_ARRAY_OPT", "dimension \" \"smaller the same dimension in the first \"", "the # computed stencil kernel size so as not to", "= ir.Var(scope, out_name, loc) # Write the return statements original", "ret_blocks] if config.DEBUG_ARRAY_OPT >= 1: print(\"ret_blocks w/ offsets\", ret_blocks, stencil_stub_last_label)", "which args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps, out_cps", "block is used for statements after sentinel. block.body = block.body[i", "Get the shape of the first input array. shape_name =", "decorator option # or np.zeros if they didn't to allocate", "block.body = new_body return ret_blocks def add_indices_to_kernel(self, kernel, index_names, ndim,", "stmt in block.body: if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Const)): if", "particular part of the index tuple. if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ", "= self.options.get(\"neighborhood\") self._type_cache = {} self._lower_me = StencilFuncLowerer(self) def replace_return_with_setitem(self,", "types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else: acc_call = ir.Expr.binop(operator.add, getitemvar,", "stencil function IR + stencil kernel IR into existence. #", "of the index variables into # ir.Var's. var_index_vars = []", "return_type, typemap, calltypes, *argtys) return new_func def _type_me(self, argtys, kwtys):", "calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign(", "= type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def", "neighborhood[i][0] = min(neighborhood[i][0], te) neighborhood[i][1] = max(neighborhood[i][1], te) else: raise", "loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc)) # Get the type of this", "user-specified slice. \"\"\" return slice(the_slice.start + addend, the_slice.stop + addend)", "text\") print(func_text) # Force the new stencil function into existence.", "to stencil \" \\ \"kernels is not allowed.\") if (isinstance(stmt,", "but of the loop nest in this new function is", "= arg.shape for i in range(len(ashape)): if ashape[i] > argshape[i]:", "stmt.target.name, stmt.value.value) # Remember consts for use later. const_dict[stmt.target.name] =", "# elements outside the bounds of the input array would", "raise NumbaValueError(\"stencil kernel index is not \" \"constant, 'neighborhood' option", "and remove the sentinel # assignment. Insert the stencil kernel", "max(stencil_ir.blocks.keys()) + 1 # Shift labels in the kernel copy", "self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) return", "result is None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if \"cval\" in", "if config.DEBUG_ARRAY_OPT >= 1: print(\"ret_blocks w/ offsets\", ret_blocks, stencil_stub_last_label) print(\"before", "getitems in the IR to transition the accesses # in", "+= \", neighborhood=None\" # look in the type cache first", "if not name in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)", "out_name, shape_name, cval_as_str(cval), return_type_name) else: out_init =\"{} = np.zeros({}, dtype=np.{})\\n\".format(", "Same idea as above but you have to extract #", "type.\" raise NumbaValueError(msg) out_init = \"{}[:] = {}\\n\".format(out_name, cval_as_str(cval)) func_text", "typemap, calltypes) = self._type_cache[argtys] new_func = self._stencil_wrapper(result, sigret, return_type, typemap,", "([sentinel_name, out_name, neighborhood_name, shape_name] + kernel_copy.arg_names + index_vars) for name,", "minimum index used in the i'th dimension # but minimum's", "= type(self).id_counter type(self).id_counter += 1 self.kernel_ir = kernel_ir self.mode =", "(), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar,", "%d \" \\ \"dimensional input array\" % (len(neighborhood), ndim)) tuple_table", "# the const dictionary. if need_to_calc_kernel: assert hasattr(stmt_index_var, 'name') if", "loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))", "add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) # Store the index used after looking up", "func_text += \" raise_if_incompatible_array_sizes(\" + first_arg for other_array in relatively_indexed:", "print(\"remembering in const_dict\", stmt.target.name, stmt.value.value) # Remember consts for use", "the loop nest in this new function is a special", "np.isinf(cval): if cval < 0: return \"-np.inf\" else: return \"np.inf\"", "addend): \"\"\" Called by stencil in Python mode to add", "class StencilFunc(object): \"\"\" A special type to hold stencil information", "\\ \"kernels is not allowed.\") if (isinstance(stmt, ir.Assign) and isinstance(stmt.value,", "cause us to # preclude any entry in the array", "argshape[i]: raise ValueError(\"Secondary stencil array has some dimension \" \"smaller", "new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined stencil function with the", "ir.Var) and te.name in const_dict: te = const_dict[te.name] if isinstance(te,", "# slice_addition. if isinstance(one_index_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc)", "= (\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(), locals()", "the index variable for each dimension. index0, index1, ... index_vars", "label, block in blocks.items(): scope = block.scope loc = block.loc", "call to # slice_addition. if isinstance(one_index_typ, types.misc.SliceType): sa_var = ir.Var(scope,", "Without literal_unroll, their # types have to match. # An", "calltypes, *args): # Overall approach: # 1) Construct a string", "= index[i] if isinstance(te, ir.Var) and te.name in const_dict: te", "this new function. # 4) Split the block containing the", "range(the_array.ndim): for j in range(offset): func_text += \" \" #", "kwtys['out'] if 'neighborhood' in kwtys: argtys_extra += (kwtys['neighborhood'],) sig_extra +=", "associated with this # statement to the calltypes copy. scopy", "parameters to the stencil kernel, loop # nests across the", "= \"\" if result is not None: sig_extra += \",", "index in kernel_consts: if isinstance(index, tuple) or isinstance(index, list): for", "x in args] + [result_type]) else: result = None array_types", "continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types =", "ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra = \"\" if", "raise ValueError(\"Secondary stencil array does not have same number \"", "prev_block.append(ir.Jump(body_first_label, loc)) # Add all the parfor loop body blocks", "index does not match array dimensionality.\") return (neighborhood, relatively_indexed) def", "out argument was not used) # then us numpy.full if", "if ndim == 1: # Single dimension always has index", "information. We need a copy of the calltypes because copy", "and make subsequent uses of the original IR invalid. \"\"\"", "of the relatively indexed # arrays are of different size", "index_names[dim], loc) index_vars += [index_var] tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar =", "relatively_indexed) def get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT >= 1: print(\"get_return_type\", argtys)", "is a special sentinel # assignment. # 3) Get the", "dummy_text = (\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(),", "args[0].ndim): raise ValueError(\"{} dimensional neighborhood specified for {} \" \"dimensional", "stencil function. from numba.core import compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks)", "does not match array dimensionality.\") return (neighborhood, relatively_indexed) def get_return_type(self,", "\"neighborhood\"]: raise ValueError(\"Unknown stencil option \" + option) wrapper =", "pysig # Get the IR for the newly created stencil", "cache to find if result array is passed (_, result,", "a new label. body_first_label = min(kernel_copy.blocks.keys()) # The previous block", "hold stencil information for the IR. \"\"\" id_counter = 0", "= ir.Var(scope, one_var, loc) var_index_vars += [index_var] s_index_name = ir_utils.mk_unique_var(\"stencil_index\")", "for use later. const_dict[stmt.target.name] = stmt.value.value if ((isinstance(stmt, ir.Assign) and", "# that will execute the stencil kernel. This function definition", "used in the kernel specification. neighborhood = [[0,0] for _", "+= \"{} = 0\\n\".format(sentinel_name) func_text += \" return {}\\n\".format(out_name) if", "Called by stencil in Python mode to add the loop", "raise NumbaValueError(\"%d dimensional neighborhood specified \" \"for %d dimensional input", "kernel_size if config.DEBUG_ARRAY_OPT >= 1: print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) # The", "Compile the combined stencil function with the replaced loop #", "out_init =\"{} = np.zeros({}, dtype=np.{})\\n\".format( out_name, shape_name, return_type_name) func_text +=", "context. \"\"\" _ty_cls = type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me))", "builder, sig, args): \"lowering for dummy stencil calls\" return lir.Constant(lir.IntType(types.intp.bitwidth),", "loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) else: # Convert the string", "if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func =", "loc)) slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc) calltypes[slice_addition_call] =", "input array. Those loop nests use the # computed stencil", "arrays passed to stencil \" \\ \"kernels is not allowed.\")", "index tuple. if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ = stmt_index_var_typ[dim] else: one_index_typ", "in range(ndim): tmpname = ir_utils.mk_unique_var(\"const_index\") tmpvar = ir.Var(scope, tmpname, loc)", "array. # So, take the minimum of 0 and the", "calls to a specific StencilFunc. ''' def __init__(self, sf): self.stencilFunc", "# containing statements after the sentinel. for ret_block in ret_blocks:", "for stmt in ir.blocks[block_label].body: # Copy the statement to the", "copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir, calltypes) # The stencil kernel body", "\"relatively indexed arrays.\") for index in kernel_consts: if isinstance(index, tuple)", "# We will put a loop nest in the generated", "afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names = ([sentinel_name,", "all the blocks that previously contained # a return in", "typemap, calltypes): \"\"\" Transforms the stencil kernel as specified by", "= min(kernel_copy.blocks.keys()) # The previous block jumps to the minimum", "getitem calls. So, in effect array[-1] becomes array[index0-1]. \"\"\" const_dict", "array\".format( len(self.neighborhood), args[0].ndim)) if 'out' in kwargs: result = kwargs['out']", "offset. ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]", "\"constant, 'neighborhood' option required\") if ndim == 1: # Single", "loc) var_index_vars += [index_var] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope,", "len(index) elif isinstance(index, int): neighborhood[0][0] = min(neighborhood[0][0], index) neighborhood[0][1] =", "ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else: continue break stencil_ir.blocks =", "utils, ir, config, ir_utils, registry from numba.core.typing.templates import (CallableTemplate, signature,", "the_array = args[0] if config.DEBUG_ARRAY_OPT >= 1: print(\"_stencil_wrapper\", return_type, return_type.dtype,", "relative to regular Python indexing. Returns the # computed size", "= self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None) res = context.call_internal(builder, cres.fndesc, sig,", "in const_dict: kernel_consts += [const_dict[stmt_index_var.name]] else: raise NumbaValueError(\"stencil kernel index", "to regular Python indexing. Returns the # computed size of", "other_array in relatively_indexed: if other_array != first_arg: func_text += \",\"", "to add the loop index to a user-specified slice. \"\"\"", "else: out_init =\"{} = np.zeros({}, dtype=np.{})\\n\".format( out_name, shape_name, return_type_name) func_text", "replace sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) # Search all the block in", "# or variable. In the latter case we'll use the", "not a numpy array.\") real_ret = types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return", "ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) + 1 # Adjust", "new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options): # called on", "1 for j in range(offset): func_text += \" \" #", "1: print(\"new stencil func text\") print(func_text) # Force the new", "containing the sentinel assignment and remove the sentinel # assignment.", "relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes)", "= ir.Var(scope, tmpname, loc) stmt_index_var_typ = typemap[stmt_index_var.name] # If the", "= [] # remember original kws arguments # stencils only", "assignment and replace it with the IR for the #", "indexing. Returns the # computed size of the stencil kernel", "StencilFunc(object): \"\"\" A special type to hold stencil information for", "tmpvar, loc), stmt.target, loc)) else: index_vars = [] sum_results =", "ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp],", "= self.get_return_type(array_types) new_func = self._stencil_wrapper(result, None, real_ret, typemap, calltypes, *array_types_full)", "a loop, for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg =", "NumbaValueError( \"Non-tuple or non-integer used as stencil index.\") if index_len", "+= \" \" # ranges[i][0] is the minimum index used", "calls. So, in effect array[-1] becomes array[index0-1]. \"\"\" const_dict =", "loc) getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc)) #", "raise NumbaValueError(msg) out_init = \"{}[:] = {}\\n\".format(out_name, cval_as_str(cval)) func_text +=", "in const_dict: te = const_dict[te.name] if isinstance(te, int): neighborhood[i][0] =", "block jumps to the minimum labelled block of # the", "contained # a return in the stencil kernel to the", "Then, we do # unary - on that to get", "@lower_builtin(stencil) def stencil_dummy_lower(context, builder, sig, args): \"lowering for dummy stencil", "Copy the kernel so that our changes for this callsite", "sig_extra)) exec(dummy_text) in globals(), locals() dummy_func = eval(\"__numba_dummy_stencil\") sig =", "from numba.core.errors import NumbaValueError from numba.misc.special import literal_unroll import numba", "preclude any entry in the array. # So, take the", "mode style \" + mode) def decorated(func): from numba.core import", "# look in the type cache first if argtys_extra in", "loop # nests across the dimensions of the input array.", "+ first_arg for other_array in relatively_indexed: if other_array != first_arg:", "all the block in the stencil outline for the sentinel.", "and stmt.value.value.name in kernel.arg_names and stmt.value.value.name not in standard_indexed): #", "typing, utils, ir, config, ir_utils, registry from numba.core.typing.templates import (CallableTemplate,", "calltypes) = self.get_return_type(array_types) new_func = self._stencil_wrapper(result, None, real_ret, typemap, calltypes,", "loc) ind_stencils += [tmpvar] getitemname = ir_utils.mk_unique_var(\"getitem\") getitemvar = ir.Var(scope,", "of the tuple indexing # expression and add the corresponding", "stencil_func_name = \"__numba_stencil_%s_%s\" % ( hex(id(the_array)).replace(\"-\", \"_\"), self.id) # We", "register_jitable from numba.core.errors import NumbaValueError from numba.misc.special import literal_unroll import", "in the IR. We will # remove this sentinel assignment", "return res @register_jitable def raise_if_incompatible_array_sizes(a, *args): ashape = a.shape #", "config.DEBUG_ARRAY_OPT >= 1: print(\"ret_blocks w/ offsets\", ret_blocks, stencil_stub_last_label) print(\"before replace", "None for option in options: if option not in [\"cval\",", "= ir.SetItem(rvar, s_index_var, stmt.value, loc) new_body.append(si) else: new_body.append(stmt) block.body =", "function. from numba.core import compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) #", "const, issue #7286 if np.isnan(cval): return \"np.nan\" elif np.isinf(cval): if", "responsible for lowering calls to a specific StencilFunc. ''' def", "cval < 0: return \"-np.inf\" else: return \"np.inf\" else: return", "= np.zeros({}, dtype=np.{})\\n\".format( out_name, shape_name, return_type_name) func_text += \" \"", "but minimum's greater than 0 don't preclude any entry in", "the variable in # the const dictionary. if need_to_calc_kernel: assert", "config.DEBUG_ARRAY_OPT >= 1: print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined", "ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc)) const_index_vars += [tmpvar]", "[] for stmt in block.body: if (isinstance(stmt, ir.Assign) and isinstance(stmt.value,", "the kernel from relative to regular Python indexing. Returns the", "func_text += \" {} = {}.shape\\n\".format(shape_name, first_arg) # Converts cval", "index variable for each dimension. index0, index1, ... index_vars =", "static_getitem in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) # Store the index used after", "input.\") argshape = arg.shape for i in range(len(ashape)): if ashape[i]", "sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var],", "ir.Expr.binop(operator.add, stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar,", "the latter case we'll use the extra neighborhood # argument", "utils.pysignature(stencil_func) sigret.pysig = pysig # Get the IR for the", "stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names =", "argtys_extra += (kwtys['neighborhood'],) sig_extra += \", neighborhood=None\" # look in", "= {}.shape\\n\".format(shape_name, first_arg) # Converts cval to a string constant", "\"stencil kernel index is not constant,\" \"'neighborhood' option required\") index_len", "loc = block.loc new_body = [] for stmt in block.body:", "+= [index_var_name] # Create extra signature for out and neighborhood.", "if len(kernel_consts) == 0: raise NumbaValueError(\"Stencil kernel with no accesses", "break else: continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type))", "Copyright (c) 2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause # import", "exec(dummy_text) in globals(), locals() dummy_func = eval(\"__numba_dummy_stencil\") sig = sig.replace(pysig=utils.pysignature(dummy_func))", "return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate a Numba function", "index to a user-specified slice. \"\"\" return slice(the_slice.start + addend,", ">= 1: print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined stencil", "construction. if len(index_vars) == 1: rvar = ir.Var(scope, out_name, loc)", "= \"{}[{}][0]\".format(neighborhood_name, i) hi = \"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo, hi)) #", "first_arg) # Converts cval to a string constant def cval_as_str(cval):", "= [x + stencil_stub_last_label for x in ret_blocks] if config.DEBUG_ARRAY_OPT", "in stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names", "loc) tmpname = ir_utils.mk_unique_var(\"stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) stmt_index_var_typ", "sentinel assignment. loc = inst.loc scope = block.scope # split", "types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa = ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa,", "kwtys: argtys_extra += (kwtys['neighborhood'],) sig_extra += \", neighborhood=None\" # look", "stmt.value.value if ((isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in", "if config.DEBUG_ARRAY_OPT >= 1: print(\"remembering in const_dict\", stmt.target.name, stmt.value.value) #", "the kernel by finding the maximum absolute value # index", "+ option) wrapper = _stencil(mode, options) if func is not", "didn't to allocate the array. if result is None: return_type_name", "\"__numba_stencil_%s_%s\" % ( hex(id(the_array)).replace(\"-\", \"_\"), self.id) # We will put", "ir_utils.mk_unique_var(\"getitem\") getitemvar = ir.Var(scope, getitemname, loc) getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim],", "to the new function. for i in range(the_array.ndim): for j", "= sa_func_typ g_sa = ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc))", "as float[:] and flags as bool[:]) # When more than", "loop nest in this new function is a special sentinel", "sentinel # assignment. # 3) Get the IR of this", "index variable # to them and then reconstitute as a", "# 4) Split the block containing the sentinel assignment and", "self._typingctx = registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx) self.neighborhood", "The but of the loop nest in this new function", "return \"-np.inf\" else: return \"np.inf\" else: return str(cval) # If", "dtype=np.{})\\n\".format( out_name, shape_name, return_type_name) func_text += \" \" + out_init", "into the result array. Returns the block labels that contained", "need a copy of the calltypes because copy propagation applied", "ir_utils.dump_blocks(kernel_copy.blocks) # We generate a Numba function to execute this", "isinstance(index, tuple) or isinstance(index, list): for i in range(len(index)): te", "scalar and not a numpy array.\") real_ret = types.npytypes.Array(return_type, argtys[0].ndim,", "copy of the kernel # and if the original statement", "stencil_dummy_lower(context, builder, sig, args): \"lowering for dummy stencil calls\" return", "\"Stencil kernel must return a scalar and not a numpy", "the current block gets a new label. body_first_label = min(kernel_copy.blocks.keys())", "loc)) # Add all the parfor loop body blocks to", "array_types, args, kwargs) (real_ret, typemap, calltypes) = self.get_return_type(array_types) new_func =", "sig, args): cres = self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None) res =", "block containing the sentinel assignment and remove the sentinel #", "the array. for dim in range(ndim): tmpname = ir_utils.mk_unique_var(\"const_index\") tmpvar", "ValueError(\"%d dimensional neighborhood specified for %d \" \\ \"dimensional input", "__numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(), locals() dummy_func =", "# We generate a Numba function to execute this stencil", "but the new block maintains the current block # label.", "= ir.Var(scope, s_index_name, loc) # Build a tuple from the", "space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT >= 1:", "= min(neighborhood[i][0], te) neighborhood[i][1] = max(neighborhood[i][1], te) else: raise NumbaValueError(", "hasattr(stmt_index_var, 'name') if stmt_index_var.name in tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]] elif", "function to execute this stencil and here # create the", "name_var_table) func_text += \" {} = {}.shape\\n\".format(shape_name, first_arg) # Converts", "kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) + 1", "!= args[0].ndim): raise ValueError(\"{} dimensional neighborhood specified for {} \"", "stmt.value.value.name not in standard_indexed): # We found a getitem from", "self.stencilFunc = sf def __call__(self, context, builder, sig, args): cres", "size of the stencil kernel and a list of the", "types.npytypes.Array): raise NumbaValueError( \"Stencil kernel must return a scalar and", "stencil function # that will execute the stencil kernel. This", "to form the new function to execute the stencil kernel.", "\" \" of dimensions as the first stencil input.\") argshape", "# The stencil kernel body becomes the body of a", "= list(array_types) if config.DEBUG_ARRAY_OPT >= 1: print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) #", "loop below. Without literal_unroll, their # types have to match.", "of this new function. # 4) Split the block containing", "their # types have to match. # An example failing", "CPU context currently self._typingctx = registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context self._typingctx.refresh()", "\"be the primary input array.\") from numba.core import typed_passes typemap,", "add the loop index to a user-specified slice. \"\"\" return", "addend, the_slice.stop + addend) class StencilFunc(object): \"\"\" A special type", "loc) prev_block.body = block.body[:i] # The current block is used", "look in the type cache first if argtys_extra in self._type_cache:", "+= \" \" + out_init else: # result is present,", "dummy_func = eval(\"__numba_dummy_stencil\") sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) self._type_cache[argtys_extra]", "neighborhood specified for %d \" \\ \"dimensional input array\" %", "= {} kernel_copy = ir.copy() kernel_copy.blocks = {} # For", "args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate a Numba function to execute", "IR + stencil kernel IR into existence. # Copy the", "= ir.Var(scope, index_names[0], loc) tmpname = ir_utils.mk_unique_var(\"stencil_index\") tmpvar = ir.Var(scope,", "min(kernel_copy.blocks.keys()) # The previous block jumps to the minimum labelled", "# Convert the string names of the index variables into", "new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: index_vars = []", "index_var = ir.Var(scope, one_var, loc) var_index_vars += [index_var] s_index_name =", "ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [getitemvar,", "mode, options) return decorated @lower_builtin(stencil) def stencil_dummy_lower(context, builder, sig, args):", "1: func_text += \" raise_if_incompatible_array_sizes(\" + first_arg for other_array in", "= options self.kws = [] # remember original kws arguments", "allowed.\") if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in", "input array\" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys sig_extra =", "must return a scalar and not a numpy array.\") real_ret", "for the # stencil kernel body. func_text += \"{} =", "= ir_utils.mk_unique_var(\"const_index\") tmpvar = ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar,", "loc = inst.loc scope = block.scope # split block across", "array. for dim in range(ndim): tmpname = ir_utils.mk_unique_var(\"const_index\") tmpvar =", "# nests across the dimensions of the input array. Those", "it with the IR for the # stencil kernel body.", "this dimension whose # use is precluded. # ranges[i][1] is", "loc) sa_func = numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ", "dimensional neighborhood specified for %d \" \\ \"dimensional input array\"", "return_type, sigret): # look in the type cache to find", "IR and replace them with a SetItem call of the", "typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes)", "{}=None\".format(neighborhood_name) # Get a list of the standard indexed array", "stmt in block.body: if isinstance(stmt, ir.Return): ret_blocks.append(label) # If 1D", "= ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) # Build a", "a string-repr numerical const, issue #7286 if np.isnan(cval): return \"np.nan\"", "different types that are not compatible # (e.g. values as", "in var_table.items(): if not name in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name)", "print(\"__call__\", array_types, args, kwargs) (real_ret, typemap, calltypes) = self.get_return_type(array_types) new_func", "\"\"\" copy_calltypes = {} kernel_copy = ir.copy() kernel_copy.blocks = {}", "index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target,", "index_vars: index_var = ir.Var(scope, one_var, loc) var_index_vars += [index_var] s_index_name", "function # that will execute the stencil kernel. This function", "index) index_len = 1 else: raise NumbaValueError( \"Non-tuple or non-integer", "does not have same number \" \" of dimensions as", "a stencil kernel must \" \"be the primary input array.\")", "prior to the # sentinel but the new block maintains", "if config.DEBUG_ARRAY_OPT >= 1: print(\"add_indices_to_kernel\", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood", "have to add the index value with a call to", "typing context. \"\"\" _ty_cls = type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,), dict(key=self,", "of the getitem calls. So, in effect array[-1] becomes array[index0-1].", "one_var in index_vars: index_var = ir.Var(scope, one_var, loc) var_index_vars +=", "to a specific StencilFunc. ''' def __init__(self, sf): self.stencilFunc =", "typemap, calltypes) def _install_type(self, typingctx): \"\"\"Constructs and installs a typing", "primary input array.\") from numba.core import typed_passes typemap, return_type, calltypes,", "locals() dummy_func = eval(\"__numba_dummy_stencil\") sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)])", "'''Callable class responsible for lowering calls to a specific StencilFunc.", "const_dict: te = const_dict[te.name] if isinstance(te, int): neighborhood[i][0] = min(neighborhood[i][0],", "stmt.target, loc)) else: acc_call = ir.Expr.binop(operator.add, stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call,", "are more than one relatively indexed arrays, add a call", "ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types = args new_stencil_param_types = list(array_types) if", "in ['setitem', 'static_setitem'] and stmt.value.value.name in kernel.arg_names) or (isinstance(stmt, ir.SetItem)", "= ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes) if \"out\"", "stencil function with the replaced loop # body in it.", "not \" \"constant, 'neighborhood' option required\") if ndim == 1:", "len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: raise NumbaValueError(\"Standard indexing requested for", "tmpvar, loc)) else: acc_call = ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call,", "conflict with any labels in the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels(", "copy.deepcopy(stmt) new_block.body.append(scopy) if stmt in calltypes: copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label]", "is passed (_, result, typemap, calltypes) = self._type_cache[argtys] new_func =", "(kwtys['neighborhood'],) sig_extra += \", neighborhood=None\" # look in the type", "\" {} = {}.shape\\n\".format(shape_name, first_arg) # Converts cval to a", "# remember original kws arguments # stencils only supported for", "= sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,", "the stencil kernel IR to prevent # conflicts with the", "loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc)) const_index_vars += [tmpvar] index_var =", "stencil array does not have same number \" \" of", "array dimensionality.\") return (neighborhood, relatively_indexed) def get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT", "specified by the user into one that includes each dimension's", "we create the name for # the index variable for", "= self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes) if", "the stencil function IR. # 5) Compile the combined stencil", "than one relatively indexed arrays, add a call to #", "for x in args]) array_types_full = array_types if config.DEBUG_ARRAY_OPT >=", "stencil option \" + option) wrapper = _stencil(mode, options) if", "the relatively indexed # arrays are of different size than", "ivar, stmt.value, loc)) else: # Convert the string names of", "int): neighborhood[i][0] = min(neighborhood[i][0], te) neighborhood[i][1] = max(neighborhood[i][1], te) else:", "pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(), locals() dummy_func = eval(\"__numba_dummy_stencil\")", "the iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT", "needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)", "The previous block jumps to the minimum labelled block of", "ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['getitem', 'static_getitem'] and", "IR along with its calltype information. We need a copy", "cval to a string constant def cval_as_str(cval): if not np.isfinite(cval):", "args]) array_types_full = array_types if config.DEBUG_ARRAY_OPT >= 1: print(\"__call__\", array_types,", "ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS,", "config.DEBUG_ARRAY_OPT >= 1: print(\"remembering in const_dict\", stmt.target.name, stmt.value.value) # Remember", "replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to form the new function", "locate it in the IR. We will # remove this", "new_block.body.append(scopy) if stmt in calltypes: copy_calltypes[scopy] = calltypes[stmt] kernel_copy.blocks[block_label] =", "kernel as specified by the user into one that includes", "np from llvmlite import ir as lir from numba.core import", "index variables to getitems in the IR to transition the", "index[i] if isinstance(te, ir.Var) and te.name in const_dict: te =", "return decorated @lower_builtin(stencil) def stencil_dummy_lower(context, builder, sig, args): \"lowering for", "sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else: acc_call =", "\" \" + out_init else: # result is present, if", "loc) new_body.append(si) else: new_body.append(stmt) block.body = new_body return ret_blocks def", "the size of the kernel by finding the maximum absolute", "up the variable in # the const dictionary. if need_to_calc_kernel:", "tmpvar, loc)) tuple_call = ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign(", "to get the positive offset in this dimension whose #", "statements prior to the # sentinel but the new block", "ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: acc_call = ir.Expr.binop(operator.add, stmt_index_var,", "calltypes): \"\"\" Create a copy of a given IR along", "stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else: continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)", "blocks.items(): scope = block.scope loc = block.loc new_body = []", "name, var in var_table.items(): if not name in reserved_names: new_var_dict[name]", "gufunc # function's IR. for (l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l]", "subsequent uses of the original IR invalid. \"\"\" copy_calltypes =", "context currently self._typingctx = registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh()", "match. # An example failing signature without literal_unroll might be", "1 self.kernel_ir = kernel_ir self.mode = mode self.options = options", "\"\"\" Create a copy of a given IR along with", "return ret_blocks def add_indices_to_kernel(self, kernel, index_names, ndim, neighborhood, standard_indexed, typemap,", "else: new_body.append(stmt) block.body = new_body return ret_blocks def add_indices_to_kernel(self, kernel,", "argshape = arg.shape for i in range(len(ashape)): if ashape[i] >", "np.full({}, {}, dtype=np.{})\\n\".format( out_name, shape_name, cval_as_str(cval), return_type_name) else: out_init =\"{}", "i) hi = \"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo, hi)) # If there", "to a string constant def cval_as_str(cval): if not np.isfinite(cval): #", "stencil func text\") print(func_text) # Force the new stencil function", "== sentinel_name): # We found the sentinel assignment. loc =", "it in the IR. We will # remove this sentinel", "present in the stencil kernel definition.\") # Add index variables", "None and len(self.neighborhood) != argtys[0].ndim): raise NumbaValueError(\"%d dimensional neighborhood specified", "self.get_return_type(array_types) new_func = self._stencil_wrapper(result, None, real_ret, typemap, calltypes, *array_types_full) if", "config.DEBUG_ARRAY_OPT >= 1: print(\"name_var_table\", name_var_table, sentinel_name) the_array = args[0] if", "typingctx): \"\"\"Constructs and installs a typing class for a StencilFunc", "for the typing class built by StencilFunc._install_type(). Return the call-site", "the calltypes and make subsequent uses of the original IR", "new_func = self._stencil_wrapper(result, None, real_ret, typemap, calltypes, *array_types_full) if result", "execute this stencil and here # create the unique name", "result_type = types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x) for x", "#raise ValueError(\"Unexpected static_getitem in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) # Store the index", "block.body[:i] # The current block is used for statements after", "standard indexed array names. standard_indexed = self.options.get(\"standard_indexing\", []) if first_arg", ">= 1: print(\"ret_blocks w/ offsets\", ret_blocks, stencil_stub_last_label) print(\"before replace sentinel", "else: raise NumbaValueError( \"stencil kernel index is not constant,\" \"'neighborhood'", "the parameters to the stencil kernel, loop # nests across", "us numpy.full if the user specified a cval stencil decorator", "kernel.arg_names and stmt.value.value.name not in standard_indexed): # We found a", "or isinstance(index, list): for i in range(len(index)): te = index[i]", "stencil function into existence. exec(func_text) in globals(), locals() stencil_func =", "the original statement is in the original # calltypes then", "types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x) for x in args])", "\"neighborhood\" in dict(self.kws): sig_extra += \", {}=None\".format(neighborhood_name) # Get a", "ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table, typemap,", "index.\") if index_len != ndim: raise NumbaValueError( \"Stencil index does", "IR to transition the accesses # in the kernel from", "be needed. # 2) The but of the loop nest", "+ 1 # Shift labels in the kernel copy so", "part of the index tuple. if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ =", "dimension # but minimum's greater than 0 don't preclude any", "= {} reserved_names = ([sentinel_name, out_name, neighborhood_name, shape_name] + kernel_copy.arg_names", "stencil kernel must \" \"be the primary input array.\") from", "and neighborhood. out_name = ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\", name_var_table)", "ret_blocks def add_indices_to_kernel(self, kernel, index_names, ndim, neighborhood, standard_indexed, typemap, calltypes):", "tmpvar = ir.Var(scope, tmpname, loc) stmt_index_var_typ = typemap[stmt_index_var.name] # If", "def _install_type(self, typingctx): \"\"\"Constructs and installs a typing class for", "in self._type_cache: (_sig, _, _, _) = self._type_cache[argtys_extra] return _sig", "\"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo, hi)) # If there are more than", "the # sentinel but the new block maintains the current", "calltypes because copy propagation applied to the copied IR will", "extra neighborhood # argument to the function. ranges = []", "# If there are more than one relatively indexed arrays,", "0 and the observed maximum index # in this dimension", "for arg in literal_unroll(args): if a.ndim != arg.ndim: raise ValueError(\"Secondary", "user specified a cval stencil decorator option # or np.zeros", "slice(the_slice.start + addend, the_slice.stop + addend) class StencilFunc(object): \"\"\" A", "first input array. shape_name = ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text += \"", "to # the current absolute location in index0. index_var =", "addition of the offset. ret_blocks = [x + stencil_stub_last_label for", "float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail) for arg in", "of the value \"returned\" by the kernel into the result", "the corresponding index variable # to them and then reconstitute", "dimension always has index variable 'index0'. # tmpvar will hold", "don't conflict with any labels in the stencil_ir. kernel_copy.blocks =", "of the index tuple. if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ = stmt_index_var_typ[dim]", "if np.isnan(cval): return \"np.nan\" elif np.isinf(cval): if cval < 0:", "passed to stencil \" \\ \"kernels is not allowed.\") if", "(AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self, argtys, kwtys, return_type,", "None: sig_extra += \", {}=None\".format(out_name) if \"neighborhood\" in dict(self.kws): sig_extra", "array.\") from numba.core import typed_passes typemap, return_type, calltypes, _ =", "index_vars[dim]], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call,", "in stencil kernels.\") sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if config.DEBUG_ARRAY_OPT >=", "tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc)) const_index_vars += [tmpvar] index_var", "index used in the kernel specification. neighborhood = [[0,0] for", "Force the new stencil function into existence. exec(func_text) in globals(),", "a setitem for that # particular point in the iteration", "= stmt.value.index_var # allow static_getitem since rewrite passes are applied", "one_index_typ = stmt_index_var_typ[:] # If the array is indexed with", "special sentinel # assignment. # 3) Get the IR of", "aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks,", "None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if \"cval\" in self.options: cval", "func_text = \"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra) # Get loop ranges", "We will put a loop nest in the generated function", "= [] for one_var in index_vars: index_var = ir.Var(scope, one_var,", "array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full = array_types", "standard_indexed: raise NumbaValueError(\"The first argument to a stencil kernel must", "1: print(\"ret_blocks w/ offsets\", ret_blocks, stencil_stub_last_label) print(\"before replace sentinel stencil_ir\")", "to the function. ranges = [] for i in range(the_array.ndim):", "# then us numpy.full if the user specified a cval", "else: index_vars = [] sum_results = [] s_index_name = ir_utils.mk_unique_var(\"stencil_index\")", "range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]) offset += 1", "# We need literal_unroll here because the stencil might take", "not to try to compute elements where # elements outside", "don't preclude any entry in the array. # So, take", "extra signature for out and neighborhood. out_name = ir_utils.get_unused_var_name(\"out\", name_var_table)", "into one that includes each dimension's index variable as part", "# 1) Construct a string containing a function definition for", "if ashape[i] > argshape[i]: raise ValueError(\"Secondary stencil array has some", "= kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)", "add a call to # a function that will raise", "slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx,", "In the latter case we'll use the extra neighborhood #", "positive offset in this dimension whose # use is precluded.", "= [] stmt_index_var_typ = typemap[stmt_index_var.name] # Same idea as above", "= self._type_cache[argtys_extra] return _sig (real_ret, typemap, calltypes) = self.get_return_type(argtys) sig", "sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func = numba.njit(slice_addition) sa_func_typ =", "typemap, calltypes, *args): # Overall approach: # 1) Construct a", "# after label and variable renaming of the stencil kernel", "tuple([typing.typeof.typeof(x) for x in args] + [result_type]) else: result =", "a slice then we # have to add the index", "if need_to_calc_kernel: # Find the size of the kernel by", "result, typemap, calltypes) return sig def copy_ir_with_calltypes(self, ir, calltypes): \"\"\"", "typing class built by StencilFunc._install_type(). Return the call-site signature. \"\"\"", "real_ret, typemap, calltypes, *array_types_full) if result is None: return new_func.entry_point(*args)", "non-integer used as stencil index.\") if index_len != ndim: raise", "4) Split the block containing the sentinel assignment and remove", "= func_or_mode func = None for option in options: if", "# multiple input arrays with different types that are not", "dimensions as the first stencil input.\") argshape = arg.shape for", "\"stencil input.\") def slice_addition(the_slice, addend): \"\"\" Called by stencil in", "new_body.append(stmt) block.body = new_body return ret_blocks def add_indices_to_kernel(self, kernel, index_names,", "stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all variables in stencil_ir", "list): for i in range(len(index)): te = index[i] if isinstance(te,", "\"kernels is not allowed.\") if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr)", "block labels that contained return statements. \"\"\" ret_blocks = []", "on function without specifying mode style if not isinstance(func_or_mode, str):", "+= \" \" + out_init offset = 1 # Add", "'static_setitem'] and stmt.value.value.name in kernel.arg_names) or (isinstance(stmt, ir.SetItem) and stmt.target.name", "iterated over in the loop below. Without literal_unroll, their #", "(real_ret, typemap, calltypes) def _install_type(self, typingctx): \"\"\"Constructs and installs a", "def copy_ir_with_calltypes(self, ir, calltypes): \"\"\" Create a copy of a", "copy_calltypes) if \"out\" in name_var_table: raise NumbaValueError(\"Cannot use the reserved", "if config.DEBUG_ARRAY_OPT >= 1: print(\"__call__\", array_types, args, kwargs) (real_ret, typemap,", "Remember consts for use later. const_dict[stmt.target.name] = stmt.value.value if ((isinstance(stmt,", "# Get the IR for the newly created stencil function.", "[]) if first_arg in standard_indexed: raise NumbaValueError(\"The first argument to", "corresponding index variable # to them and then reconstitute as", "index_len = 1 else: raise NumbaValueError( \"Non-tuple or non-integer used", "if 'out' in kwargs: result = kwargs['out'] rdtype = result.dtype", "# # Copyright (c) 2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause", "ranges[i][1]) offset += 1 for j in range(offset): func_text +=", "statement to the new copy of the kernel # and", "number \" \" of dimensions as the first stencil input.\")", "statements in the IR and replace them with a SetItem", "var_index_vars = [] for one_var in index_vars: index_var = ir.Var(scope,", "class for a StencilFunc object in the input typing context.", "kernel_consts = [] if config.DEBUG_ARRAY_OPT >= 1: print(\"add_indices_to_kernel\", ndim, neighborhood)", "= ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ,", "might be # (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't", "# adding the relative offset in stmt.value.index to # the", "print(\"_stencil_wrapper\", return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate a", "tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set() for block in kernel.blocks.values():", "for each # dimension in the input array. Here we", "- set(kernel_copy.arg_names)) != 0: raise NumbaValueError(\"Standard indexing requested for an", "# computed size of the stencil kernel and a list", "prev_block = ir.Block(scope, loc) prev_block.body = block.body[:i] # The current", "then reconstitute as a tuple that can # index the", "\" # ranges[i][0] is the minimum index used in the", "ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) else: # Convert", "the sentinel assignment and remove the sentinel # assignment. Insert", "(kernel_copy, copy_calltypes) def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):", "kernel_consts += [tuple_table[stmt_index_var.name]] elif stmt_index_var.name in const_dict: kernel_consts += [const_dict[stmt_index_var.name]]", "eval(stencil_func_name) if sigret is not None: pysig = utils.pysignature(stencil_func) sigret.pysig", "scope = block.scope # split block across __sentinel__ # A", "original IR invalid. \"\"\" copy_calltypes = {} kernel_copy = ir.copy()", "in the first \" \"stencil input.\") def slice_addition(the_slice, addend): \"\"\"", "labelled block of # the parfor body. prev_block.append(ir.Jump(body_first_label, loc)) #", "code so we can locate it in the IR. We", "the sentinel. for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break", "stmt_index_var_typ[dim] else: one_index_typ = stmt_index_var_typ[:] # If the array is", "the string names of the index variables into # ir.Var's.", "the i'th dimension # but minimum's greater than 0 don't", "self.options[\"cval\"] if return_type.dtype != typing.typeof.typeof(cval): msg = \"cval type does", "them and then reconstitute as a tuple that can #", "since rewrite passes are applied #raise ValueError(\"Unexpected static_getitem in add_indices_to_kernel.\")", "= compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all variables in stencil_ir afresh", "form the new function to execute the stencil kernel. func_text", "body_first_label = min(kernel_copy.blocks.keys()) # The previous block jumps to the", "the stencil kernel. func_text = \"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra) #", "+ 1 # Adjust ret_blocks to account for addition of", "the same dimension in the first \" \"stencil input.\") def", "# in the kernel from relative to regular Python indexing.", "self.neighborhood, standard_indexed, typemap, copy_calltypes) if self.neighborhood is None: self.neighborhood =", "if len(relatively_indexed) > 1: func_text += \" raise_if_incompatible_array_sizes(\" + first_arg", "and then reconstitute as a tuple that can # index", "requested for an array name \" \"not present in the", "ranges = [] for i in range(the_array.ndim): if isinstance(kernel_size[i][0], int):", "= func_or_mode else: mode = func_or_mode func = None for", "argtys): if config.DEBUG_ARRAY_OPT >= 1: print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not", "ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"After", "args[0] if config.DEBUG_ARRAY_OPT >= 1: print(\"_stencil_wrapper\", return_type, return_type.dtype, type(return_type.dtype), args)", "in the iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if", "the minimum of 0 and the minimum index found in", "args]) array_types_full = tuple([typing.typeof.typeof(x) for x in args] + [result_type])", "first argument to a stencil kernel must \" \"use relative", "NumbaValueError from numba.misc.special import literal_unroll import numba import operator from", "of this function. stencil_func_name = \"__numba_stencil_%s_%s\" % ( hex(id(the_array)).replace(\"-\", \"_\"),", "the index used after looking up the variable in #", "loc)) rvar = ir.Var(scope, out_name, loc) # Write the return", "different size than the first input array. if len(relatively_indexed) >", "= [] for stmt in block.body: if (isinstance(stmt, ir.Assign) and", "a specific StencilFunc. ''' def __init__(self, sf): self.stencilFunc = sf", "import lower_builtin from numba.core.extending import register_jitable from numba.core.errors import NumbaValueError", "sentinel assignment and remove the sentinel # assignment. Insert the", "% ( hex(id(the_array)).replace(\"-\", \"_\"), self.id) # We will put a", "for (l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] =", "ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names = ([sentinel_name, out_name, neighborhood_name, shape_name]", "sig, args): \"lowering for dummy stencil calls\" return lir.Constant(lir.IntType(types.intp.bitwidth), 0)", "_stencil(mode, options): if mode != 'constant': raise ValueError(\"Unsupported mode style", "new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc)", "loc)) tuple_call = ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc),", "and if the original statement is in the original #", "+ out_init offset = 1 # Add the loop nests", "s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt) block.body = new_body", "slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx,", "iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT >=", "types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target,", "if \"out\" in name_var_table: raise NumbaValueError(\"Cannot use the reserved word", "if first_arg in standard_indexed: raise NumbaValueError(\"The first argument to a", "= mode self.options = options self.kws = [] # remember", "and not a numpy array.\") real_ret = types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout)", "for label, block in blocks.items(): scope = block.scope loc =", "self.options: cval = self.options[\"cval\"] if return_type.dtype != typing.typeof.typeof(cval): msg =", "the positive offset in this dimension whose # use is", "is present, if cval is set then use it if", "_install_type(self, typingctx): \"\"\"Constructs and installs a typing class for a", "the offset. ret_blocks = [x + stencil_stub_last_label for x in", "by the user into one that includes each dimension's index", "to the block # containing statements after the sentinel. for", "NumbaValueError(\"Standard indexing requested for an array name \" \"not present", "len(self.neighborhood) != args[0].ndim): raise ValueError(\"{} dimensional neighborhood specified for {}", "the IR for the # stencil kernel body. func_text +=", "isinstance(te, ir.Var) and te.name in const_dict: te = const_dict[te.name] if", "stmt_index_var_typ = typemap[stmt_index_var.name] # Same idea as above but you", "sentinel # assignment. Insert the stencil kernel IR into the", "ir.SetItem) and stmt.target.name in kernel.arg_names)): raise ValueError(\"Assignments to arrays passed", "# ranges[i][1] is the maximum of 0 and the observed", "kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate(", "shape of the first input array. shape_name = ir_utils.get_unused_var_name(\"full_shape\", name_var_table)", "new_body = [] for stmt in block.body: if isinstance(stmt, ir.Return):", "g_sa = ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call =", "entry in the array from being used. func_text += (\"for", "stmt.value.op == 'getitem': stmt_index_var = stmt.value.index else: stmt_index_var = stmt.value.index_var", "kernel must return a scalar and not a numpy array.\")", "index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes) if self.neighborhood is None:", "['getitem', 'static_getitem'] and stmt.value.value.name in kernel.arg_names and stmt.value.value.name not in", "that are not compatible # (e.g. values as float[:] and", "locals() stencil_func = eval(stencil_func_name) if sigret is not None: pysig", "def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args): # Overall", "ashape = a.shape # We need literal_unroll here because the", "return _sig (real_ret, typemap, calltypes) = self.get_return_type(argtys) sig = signature(real_ret,", "the new function to execute the stencil kernel. func_text =", "# When more than three total arrays are given, the", "and replace it with the IR for the # stencil", "(sig, result, typemap, calltypes) return sig def copy_ir_with_calltypes(self, ir, calltypes):", "ir.Return): ret_blocks.append(label) # If 1D array then avoid the tuple", "block) in ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body = [] #", "of the loop nest in this new function is a", "{} reserved_names = ([sentinel_name, out_name, neighborhood_name, shape_name] + kernel_copy.arg_names +", "*args, **kwargs): if (self.neighborhood is not None and len(self.neighborhood) !=", "Python mode to add the loop index to a user-specified", "isinstance(return_type, types.npytypes.Array): raise NumbaValueError( \"Stencil kernel must return a scalar", "standard indexing.\") if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: raise NumbaValueError(\"Standard", "the maximum of 0 and the observed maximum index #", "= {} # For each block... for (block_label, block) in", "index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call = ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call,", "self.mode = mode self.options = options self.kws = [] #", "\"Non-tuple or non-integer used as stencil index.\") if index_len !=", "Intel Corporation # SPDX-License-Identifier: BSD-2-Clause # import copy import numpy", "stmt_index_var_typ[:] # If the array is indexed with a slice", "index_vars += [index_var_name] # Create extra signature for out and", "raise NumbaValueError(\"Standard indexing requested for an array name \" \"not", "# (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail) for", "= ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names = ([sentinel_name, out_name, neighborhood_name,", "'constant': raise ValueError(\"Unsupported mode style \" + mode) def decorated(func):", "allocate the output array (the out argument was not used)", "new_body.append(si) else: new_body.append(stmt) block.body = new_body return ret_blocks def add_indices_to_kernel(self,", "1: print(\"add_indices_to_kernel\", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood is None: need_to_calc_kernel", "ValueError(\"{} dimensional neighborhood specified for {} \" \"dimensional input array\".format(", "# We found a getitem from the input array. if", "(real_ret, typemap, calltypes) = self.get_return_type(array_types) new_func = self._stencil_wrapper(result, None, real_ret,", "'neighborhood' option required\") if ndim == 1: # Single dimension", "if config.DEBUG_ARRAY_OPT >= 1: print(\"_stencil_wrapper\", return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks)", "=\"{} = np.zeros({}, dtype=np.{})\\n\".format( out_name, shape_name, return_type_name) func_text += \"", "# look in the type cache to find if result", "index_names, ndim, neighborhood, standard_indexed, typemap, calltypes): \"\"\" Transforms the stencil", "input array would be needed. # 2) The but of", "the function. ranges = [] for i in range(the_array.ndim): if", "= ir.Expr.build_tuple(var_index_vars, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) rvar = ir.Var(scope, out_name,", "Get the IR of this new function. # 4) Split", "{}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"new stencil func text\") print(func_text)", "signature. \"\"\" if (self.neighborhood is not None and len(self.neighborhood) !=", "# 3) Get the IR of this new function. #", "then us numpy.full if the user specified a cval stencil", "stencil kernel IR into existence. # Copy the kernel so", "\"cval\" in self.options: cval = self.options[\"cval\"] if return_type.dtype != typing.typeof.typeof(cval):", "addend) class StencilFunc(object): \"\"\" A special type to hold stencil", "copy. scopy = copy.deepcopy(stmt) new_block.body.append(scopy) if stmt in calltypes: copy_calltypes[scopy]", "result array. Returns the block labels that contained return statements.", "# individual elements out of the tuple indexing # expression", "= block.scope loc = block.loc new_body = [] for stmt", "range(ndim)] if len(kernel_consts) == 0: raise NumbaValueError(\"Stencil kernel with no", "\", out=None\" result = kwtys['out'] if 'neighborhood' in kwtys: argtys_extra", "loop nests to the new function. for i in range(the_array.ndim):", "get the positive offset in this dimension whose # use", "= ir.Var(scope, tmpname, loc) ind_stencils += [tmpvar] getitemname = ir_utils.mk_unique_var(\"getitem\")", "with its calltype information. We need a copy of the", "loc) ivar = ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))", "self._typingctx, self._targetctx, self.kernel_ir, argtys, None, {}) if isinstance(return_type, types.npytypes.Array): raise", "sig.return_type, None) res = context.call_internal(builder, cres.fndesc, sig, args) context.add_linking_libs([cres.library]) return", "ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt) block.body = new_body if need_to_calc_kernel: #", "# preclude any entry in the array from being used.", "__init__(self, kernel_ir, mode, options): self.id = type(self).id_counter type(self).id_counter += 1", "numba.misc.special import literal_unroll import numba import operator from numba.np import", "in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys())", "in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name(\"index\" + str(i), name_var_table) index_vars +=", "func is not None: return wrapper(func) return wrapper def _stencil(mode,", "for each dimension, which could be either int # or", "because copy propagation applied to the copied IR will change", "created stencil function. from numba.core import compiler stencil_ir = compiler.run_frontend(stencil_func)", "[stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc),", "= (sig, result, typemap, calltypes) return sig def copy_ir_with_calltypes(self, ir,", "self._typingctx.can_convert(cval_ty, return_type.dtype): msg = \"cval type does not match stencil", "wrapper = _stencil(mode, options) if func is not None: return", "None and len(self.neighborhood) != args[0].ndim): raise ValueError(\"{} dimensional neighborhood specified", "if the original statement is in the original # calltypes", "compiler kernel_ir = compiler.run_frontend(func) return StencilFunc(kernel_ir, mode, options) return decorated", "renaming of the stencil kernel IR to prevent # conflicts", "return (real_ret, typemap, calltypes) def _install_type(self, typingctx): \"\"\"Constructs and installs", "entry in the array. # So, take the minimum of", "cval is set then use it if \"cval\" in self.options:", "copied IR will change the calltypes and make subsequent uses", "minimum of 0 and the minimum index found in the", "copy so they are guaranteed unique # and don't conflict", "i in range(the_array.ndim): for j in range(offset): func_text += \"", "s_index_var, stmt.value, loc) new_body.append(si) else: new_body.append(stmt) block.body = new_body return", "s_index_name, loc) const_index_vars = [] ind_stencils = [] stmt_index_var_typ =", "the array using the tuple index. si = ir.SetItem(rvar, s_index_var,", "newly created stencil function. from numba.core import compiler stencil_ir =", "here because the stencil might take # multiple input arrays", "['setitem', 'static_setitem'] and stmt.value.value.name in kernel.arg_names) or (isinstance(stmt, ir.SetItem) and", "neighborhood specified \" \"for %d dimensional input array\" % (len(self.neighborhood),", "'out' in kwtys: argtys_extra += (kwtys['out'],) sig_extra += \", out=None\"", "typemap, calltypes) return sig def copy_ir_with_calltypes(self, ir, calltypes): \"\"\" Create", "special type to hold stencil information for the IR. \"\"\"", "'out' in kwargs: result = kwargs['out'] rdtype = result.dtype rttype", "5) Compile the combined stencil function IR + stencil kernel", "of a loop, for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg", "get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT >= 1: print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if", "calltypes and make subsequent uses of the original IR invalid.", "type does not match stencil return type.\" raise NumbaValueError(msg) out_init", "copy of the calltypes because copy propagation applied to the", "block.body: if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT >=", "# Get the type of this particular part of the", "# expression and add the corresponding index variable # to", "id_counter = 0 def __init__(self, kernel_ir, mode, options): self.id =", "adding the relative offset in stmt.value.index to # the current", "index_var = ir.Var(scope, index_names[dim], loc) index_vars += [index_var] tmpname =", "not None: sig_extra += \", {}=None\".format(out_name) if \"neighborhood\" in dict(self.kws):", "first_arg: func_text += \",\" + other_array func_text += \")\\n\" #", "need_to_calc_kernel = False if len(neighborhood) != ndim: raise ValueError(\"%d dimensional", "ashape[i] > argshape[i]: raise ValueError(\"Secondary stencil array has some dimension", "+= \" \" # Put a sentinel in the code", "return_type, typemap, calltypes, *args): # Overall approach: # 1) Construct", "offset in stmt.value.index to # the current absolute location in", "[one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else: acc_call = ir.Expr.binop(operator.add,", "a list of the standard indexed array names. standard_indexed =", "first if argtys_extra in self._type_cache: (_sig, _, _, _) =", "# Single dimension always has index variable 'index0'. # tmpvar", "has index variable 'index0'. # tmpvar will hold the real", "# arrays are of different size than the first input", "the generated function for each # dimension in the input", "Compile the combined stencil function IR + stencil kernel IR", "out_name, loc) ivar = ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value,", "in blocks.items(): scope = block.scope loc = block.loc new_body =", "loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc))", "required\") index_len = len(index) elif isinstance(index, int): neighborhood[0][0] = min(neighborhood[0][0],", "in the i'th dimension # but minimum's greater than 0", "input typing context. \"\"\" _ty_cls = type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,),", "= stmt.value.value if ((isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op", "int): neighborhood[0][0] = min(neighborhood[0][0], index) neighborhood[0][1] = max(neighborhood[0][1], index) index_len", "to the # sentinel but the new block maintains the", "approach: # 1) Construct a string containing a function definition", "+= \" raise_if_incompatible_array_sizes(\" + first_arg for other_array in relatively_indexed: if", "# We found the sentinel assignment. loc = inst.loc scope", "[] sum_results = [] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope,", "ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text += \" {} = {}.shape\\n\".format(shape_name, first_arg) #", "index_var = ir.Var(scope, index_names[0], loc) tmpname = ir_utils.mk_unique_var(\"stencil_index\") tmpvar =", "signature, infer_global, AbstractTemplate) from numba.core.imputils import lower_builtin from numba.core.extending import", "ir.Var(scope, s_index_name, loc) # Build a tuple from the index", "self._type_cache = {} self._lower_me = StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks, index_vars,", "dim in range(ndim): tmpname = ir_utils.mk_unique_var(\"const_index\") tmpvar = ir.Var(scope, tmpname,", "if (self.neighborhood is not None and len(self.neighborhood) != argtys[0].ndim): raise", "Get a list of the standard indexed array names. standard_indexed", "!= first_arg: func_text += \",\" + other_array func_text += \")\\n\"", "SetItem call of the value \"returned\" by the kernel into", "sentinel but the new block maintains the current block #", "the result array. Returns the block labels that contained return", "in kernel.arg_names and stmt.value.value.name not in standard_indexed): # We found", "kernel index is not constant,\" \"'neighborhood' option required\") index_len =", "kernel IR to prevent # conflicts with the stencil function", "the accesses # in the kernel from relative to regular", "% (len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys sig_extra = \"\" result", "# stencil kernel body. func_text += \"{} = 0\\n\".format(sentinel_name) func_text", "1 else: raise NumbaValueError( \"Non-tuple or non-integer used as stencil", "import typed_passes typemap, return_type, calltypes, _ = typed_passes.type_inference_stage( self._typingctx, self._targetctx,", "are not compatible # (e.g. values as float[:] and flags", "+= 1 self.kernel_ir = kernel_ir self.mode = mode self.options =", "sf): self.stencilFunc = sf def __call__(self, context, builder, sig, args):", "# arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood,", "in [\"cval\", \"standard_indexing\", \"neighborhood\"]: raise ValueError(\"Unknown stencil option \" +", "import (CallableTemplate, signature, infer_global, AbstractTemplate) from numba.core.imputils import lower_builtin from", "a Numba function to execute this stencil and here #", "array. if result is None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if", "not np.isfinite(cval): # See if this is a string-repr numerical", "first input array. if len(relatively_indexed) > 1: func_text += \"", "the first stencil input.\") argshape = arg.shape for i in", "ranges[i][1] is the maximum of 0 and the observed maximum", "# statement to the calltypes copy. scopy = copy.deepcopy(stmt) new_block.body.append(scopy)", "new block maintains the current block # label. prev_block =", "str): mode = 'constant' # default style func = func_or_mode", "along with its calltype information. We need a copy of", "in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else: continue break stencil_ir.blocks", "return statements. \"\"\" ret_blocks = [] for label, block in", "if other_array != first_arg: func_text += \",\" + other_array func_text", "neighborhood_name, shape_name] + kernel_copy.arg_names + index_vars) for name, var in", "ValueError(\"Unexpected static_getitem in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) # Store the index used", "= self.options[\"cval\"] cval_ty = typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg", "+= [tuple_table[stmt_index_var.name]] elif stmt_index_var.name in const_dict: kernel_consts += [const_dict[stmt_index_var.name]] else:", "kws arguments # stencils only supported for CPU context currently", "raise ValueError(\"%d dimensional neighborhood specified for %d \" \\ \"dimensional", "by finding the maximum absolute value # index used in", "_) = self._type_cache[argtys_extra] return _sig (real_ret, typemap, calltypes) = self.get_return_type(argtys)", "\"np.nan\" elif np.isinf(cval): if cval < 0: return \"-np.inf\" else:", "block... for stmt in ir.blocks[block_label].body: # Copy the statement to", "if cval < 0: return \"-np.inf\" else: return \"np.inf\" else:", "fail) for arg in literal_unroll(args): if a.ndim != arg.ndim: raise", "elif np.isinf(cval): if cval < 0: return \"-np.inf\" else: return", "typing class for a StencilFunc object in the input typing", "the typing class built by StencilFunc._install_type(). Return the call-site signature.", "if ((isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['setitem',", "calltypes[stmt] kernel_copy.blocks[block_label] = new_block return (kernel_copy, copy_calltypes) def _stencil_wrapper(self, result,", "to account for addition of the offset. ret_blocks = [x", "= self.options[\"cval\"] if return_type.dtype != typing.typeof.typeof(cval): msg = \"cval type", "= numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa =", "# Build a tuple from the index ir.Var's. tuple_call =", "copy_calltypes = {} kernel_copy = ir.copy() kernel_copy.blocks = {} #", "stencil in Python mode to add the loop index to", "be a negative number (potentially -0). Then, we do #", "kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes) if \"out\" in name_var_table: raise", "not cause us to # preclude any entry in the", "kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) + 1 # Adjust ret_blocks", "array_types if config.DEBUG_ARRAY_OPT >= 1: print(\"__call__\", array_types, args, kwargs) (real_ret,", "else: acc_call = ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar, loc))", "types have to match. # An example failing signature without", "stmt.value, loc)) else: # Convert the string names of the", "the unique name of this function. stencil_func_name = \"__numba_stencil_%s_%s\" %", "x in args]) array_types_full = array_types if config.DEBUG_ARRAY_OPT >= 1:", "types, typing, utils, ir, config, ir_utils, registry from numba.core.typing.templates import", "range(offset): func_text += \" \" # ranges[i][0] is the minimum", "So, in effect array[-1] becomes array[index0-1]. \"\"\" const_dict = {}", "out_init = \"{}[:] = {}\\n\".format(out_name, cval_as_str(cval)) func_text += \" \"", "loc) stmt_index_var_typ = typemap[stmt_index_var.name] # If the array is indexed", "the loop below. Without literal_unroll, their # types have to", "copy.deepcopy(ir.blocks[block_label]) new_block.body = [] # For each statement in each", "ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 # Shift", "and the observed maximum index # in this dimension because", "or np.zeros if they didn't to allocate the array. if", "sentinel in the code so we can locate it in", "use the # computed stencil kernel size so as not", "Write the return statements original value into # the array", "us to # preclude any entry in the array from", "stmt.value.value.name in kernel.arg_names and stmt.value.value.name not in standard_indexed): # We", "context.call_internal(builder, cres.fndesc, sig, args) context.add_linking_libs([cres.library]) return res @register_jitable def raise_if_incompatible_array_sizes(a,", "return_type_name) func_text += \" \" + out_init else: # result", "_ = typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir, argtys, None, {}) if", "in ret_blocks] if config.DEBUG_ARRAY_OPT >= 1: print(\"ret_blocks w/ offsets\", ret_blocks,", "first argument to a stencil kernel must \" \"be the", "the_slice.stop + addend) class StencilFunc(object): \"\"\" A special type to", "the stencil function IR # after label and variable renaming", "not match stencil return type.\" raise NumbaValueError(msg) out_init =\"{} =", "kwargs: result = kwargs['out'] rdtype = result.dtype rttype = numpy_support.from_dtype(rdtype)", "name_var_table) if config.DEBUG_ARRAY_OPT >= 1: print(\"name_var_table\", name_var_table, sentinel_name) the_array =", "(block_label, block) in ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body = []", "for statements after sentinel. block.body = block.body[i + 1:] #", "stencil_stub_last_label for x in ret_blocks] if config.DEBUG_ARRAY_OPT >= 1: print(\"ret_blocks", "elif isinstance(index, int): neighborhood[0][0] = min(neighborhood[0][0], index) neighborhood[0][1] = max(neighborhood[0][1],", "to allocate the output array (the out argument was not", "in literal_unroll(args): if a.ndim != arg.ndim: raise ValueError(\"Secondary stencil array", "the copied IR will change the calltypes and make subsequent", "#7286 if np.isnan(cval): return \"np.nan\" elif np.isinf(cval): if cval <", "result array is passed (_, result, typemap, calltypes) = self._type_cache[argtys]", "= [] if config.DEBUG_ARRAY_OPT >= 1: print(\"add_indices_to_kernel\", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks)", "kernel by finding the maximum absolute value # index used", "the input array. if stmt.value.op == 'getitem': stmt_index_var = stmt.value.index", "+ str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self, argtys,", "config.DEBUG_ARRAY_OPT >= 1: print(\"add_indices_to_kernel\", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if neighborhood is", "A new block is allocated for the statements prior to", "mode != 'constant': raise ValueError(\"Unsupported mode style \" + mode)", "relatively_indexed: if other_array != first_arg: func_text += \",\" + other_array", "from the input array. if stmt.value.op == 'getitem': stmt_index_var =", "kwtys, return_type, sigret): # look in the type cache to", "args] + [result_type]) else: result = None array_types = tuple([typing.typeof.typeof(x)", "do # unary - on that to get the positive", "reconstitute as a tuple that can # index the array.", "isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ = stmt_index_var_typ[dim] else: one_index_typ = stmt_index_var_typ[:] #", "stencil function IR. # 5) Compile the combined stencil function", "loop # body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir( self._typingctx,", "[] if config.DEBUG_ARRAY_OPT >= 1: print(\"add_indices_to_kernel\", ndim, neighborhood) ir_utils.dump_blocks(kernel.blocks) if", "for each dimension. index0, index1, ... index_vars = [] for", "# (e.g. values as float[:] and flags as bool[:]) #", "jumps to the minimum labelled block of # the parfor", "and flags as bool[:]) # When more than three total", "function IR. # 5) Compile the combined stencil function IR", "var_table.items(): if not name in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks,", "func_or_mode else: mode = func_or_mode func = None for option", "sentinel_name) the_array = args[0] if config.DEBUG_ARRAY_OPT >= 1: print(\"_stencil_wrapper\", return_type,", "mode) def decorated(func): from numba.core import compiler kernel_ir = compiler.run_frontend(func)", "kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] = prev_block", "def stencil(func_or_mode='constant', **options): # called on function without specifying mode", "kernel becomes a setitem for that # particular point in", "1: rvar = ir.Var(scope, out_name, loc) ivar = ir.Var(scope, index_vars[0],", "return statements original value into # the array using the", "# result is present, if cval is set then use", "this sentinel assignment and replace it with the IR for", "stencil kernel IR into the stencil function IR # after", "statements after the sentinel. for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label,", "return wrapper(func) return wrapper def _stencil(mode, options): if mode !=", "maximum of 0 and the observed maximum index # in", "= inst.loc scope = block.scope # split block across __sentinel__", "= sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) else: acc_call", "import numpy_support class StencilFuncLowerer(object): '''Callable class responsible for lowering calls", "getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc) new_body.append(ir.Assign(getitemcall, getitemvar, loc)) # Get", "= new_block return (kernel_copy, copy_calltypes) def _stencil_wrapper(self, result, sigret, return_type,", "'constant' # default style func = func_or_mode else: mode =", "stencil_ir.blocks.items(): for i, inst in enumerate(block.body): if (isinstance( inst, ir.Assign)", "in globals(), locals() stencil_func = eval(stencil_func_name) if sigret is not", "raise ValueError(\"Assignments to arrays passed to stencil \" \\ \"kernels", "stencil kernel and a list of the relatively indexed #", "the stencil kernel becomes a setitem for that # particular", "statement is in the original # calltypes then add the", "the kernel # and this will be a negative number", "argument to a stencil kernel must \" \"use relative indexing,", "take the minimum of 0 and the minimum index found", "new function is a special sentinel # assignment. # 3)", "'neighborhood' in kwtys: argtys_extra += (kwtys['neighborhood'],) sig_extra += \", neighborhood=None\"", "IR into existence. # Copy the kernel so that our", "!= typing.typeof.typeof(cval): msg = \"cval type does not match stencil", "argtys_extra += (kwtys['out'],) sig_extra += \", out=None\" result = kwtys['out']", "options): if mode != 'constant': raise ValueError(\"Unsupported mode style \"", "changes for this callsite # won't effect other callsites. (kernel_copy,", "each # dimension in the input array. Here we create", "stencil return type.\" raise NumbaValueError(msg) out_init =\"{} = np.full({}, {},", "sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) # Search", "if \"neighborhood\" in dict(self.kws): sig_extra += \", {}=None\".format(neighborhood_name) # Get", "var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names = ([sentinel_name, out_name,", "stencil kernel to the block # containing statements after the", "= [] sum_results = [] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var =", "call to # slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var = ir.Var(scope,", "value with a call to # slice_addition. if isinstance(one_index_typ, types.misc.SliceType):", "would not cause us to # preclude any entry in", "\" return {}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"new stencil func", "for {} \" \"dimensional input array\".format( len(self.neighborhood), args[0].ndim)) if 'out'", "= ([sentinel_name, out_name, neighborhood_name, shape_name] + kernel_copy.arg_names + index_vars) for", "in the original # calltypes then add the type associated", "will # remove this sentinel assignment and replace it with", "option # or np.zeros if they didn't to allocate the", "option required\") index_len = len(index) elif isinstance(index, int): neighborhood[0][0] =", "\"\"\" const_dict = {} kernel_consts = [] if config.DEBUG_ARRAY_OPT >=", "new function. for i in range(the_array.ndim): for j in range(offset):", "IR. \"\"\" id_counter = 0 def __init__(self, kernel_ir, mode, options):", "string names of the index variables into # ir.Var's. var_index_vars", "= 0\\n\".format(sentinel_name) func_text += \" return {}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT >=", "ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra = \"\" if result is not None:", "compute elements where # elements outside the bounds of the", "def replace_return_with_setitem(self, blocks, index_vars, out_name): \"\"\" Find return statements in", "compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all variables in", "array_types_full = array_types if config.DEBUG_ARRAY_OPT >= 1: print(\"__call__\", array_types, args,", "\"for %d dimensional input array\" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra =", "expression and add the corresponding index variable # to them", "loc) # Write the return statements original value into #", "stencil kernel size so as not to try to compute", "max(kernel_copy.blocks.keys()) + 1 # Adjust ret_blocks to account for addition", "if isinstance(te, ir.Var) and te.name in const_dict: te = const_dict[te.name]", "the IR to transition the accesses # in the kernel", "the kernel into the result array. Returns the block labels", "kernel # and this will be a negative number (potentially", "kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes) if self.neighborhood is", "numba.core.extending import register_jitable from numba.core.errors import NumbaValueError from numba.misc.special import", "new function to execute the stencil kernel. func_text = \"def", "# rename all variables in stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks)", "# label. prev_block = ir.Block(scope, loc) prev_block.body = block.body[:i] #", "tmpvar will hold the real index and is computed by", "stencil and here # create the unique name of this", "assert(isinstance(the_array, types.Type)) array_types = args new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT", "if argtys_extra in self._type_cache: (_sig, _, _, _) = self._type_cache[argtys_extra]", "= new_body return ret_blocks def add_indices_to_kernel(self, kernel, index_names, ndim, neighborhood,", "for this callsite # won't effect other callsites. (kernel_copy, copy_calltypes)", "is the minimum index used in the i'th dimension #", "computed by # adding the relative offset in stmt.value.index to", "self.neighborhood is None: self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT >= 1:", "stmt.target, loc)) else: index_vars = [] sum_results = [] s_index_name", "new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc)", "(\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(), locals() dummy_func", "We need a copy of the calltypes because copy propagation", "for i in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name(\"index\" + str(i), name_var_table)", "computed size of the stencil kernel and a list of", "# but minimum's greater than 0 don't preclude any entry", "new block is allocated for the statements prior to the", "import numpy as np from llvmlite import ir as lir", "func_text += \" return {}\\n\".format(out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"new", "def decorated(func): from numba.core import compiler kernel_ir = compiler.run_frontend(func) return", "# Remember consts for use later. const_dict[stmt.target.name] = stmt.value.value if", "the stencil function # that will execute the stencil kernel.", "# function's IR. for (l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] =", "sig_extra) # Get loop ranges for each dimension, which could", "array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full = tuple([typing.typeof.typeof(x)", "block in kernel.blocks.values(): scope = block.scope loc = block.loc new_body", "argtys_extra)]) self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) return sig def", "that will raise an error if any of the relatively", "we'll use the extra neighborhood # argument to the function.", "str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self, argtys, kwtys,", "if \"cval\" in self.options: cval = self.options[\"cval\"] if return_type.dtype !=", "all the parfor loop body blocks to the gufunc #", "neighborhood[0][1] = max(neighborhood[0][1], index) index_len = 1 else: raise NumbaValueError(", "change the calltypes and make subsequent uses of the original", "\",\".join(kernel_copy.arg_names), sig_extra) # Get loop ranges for each dimension, which", "= _stencil(mode, options) if func is not None: return wrapper(func)", "out_name) if config.DEBUG_ARRAY_OPT >= 1: print(\"After replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) #", "specified \" \"for %d dimensional input array\" % (len(self.neighborhood), argtys[0].ndim))", "a cval stencil decorator option # or np.zeros if they", "from llvmlite import ir as lir from numba.core import types,", "-0). Then, we do # unary - on that to", "indexing # expression and add the corresponding index variable #", "literal_unroll, their # types have to match. # An example", "ir.Expr) and stmt.value.op in ['getitem', 'static_getitem'] and stmt.value.value.name in kernel.arg_names", "NumbaValueError(\"Stencil kernel with no accesses to \" \"relatively indexed arrays.\")", "if they didn't to allocate the array. if result is", "Transforms the stencil kernel as specified by the user into", "\" raise_if_incompatible_array_sizes(\" + first_arg for other_array in relatively_indexed: if other_array", "print(\"ret_blocks w/ offsets\", ret_blocks, stencil_stub_last_label) print(\"before replace sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks)", "compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return new_func", "arguments # stencils only supported for CPU context currently self._typingctx", "isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func = numba.njit(slice_addition)", "= None for option in options: if option not in", "function definition includes a # unique stencil function name, the", "# Put a sentinel in the code so we can", "in stencil_ir.blocks.items(): for i, inst in enumerate(block.body): if (isinstance( inst,", "= StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks, index_vars, out_name): \"\"\" Find return", "# If we have to allocate the output array (the", "value with a call to # slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType):", "original kws arguments # stencils only supported for CPU context", "as the first stencil input.\") argshape = arg.shape for i", "types.ConstSized): one_index_typ = stmt_index_var_typ[dim] else: one_index_typ = stmt_index_var_typ[:] # If", "the combined stencil function with the replaced loop # body", "Implement AbstractTemplate.generic() for the typing class built by StencilFunc._install_type(). Return", "scopy = copy.deepcopy(stmt) new_block.body.append(scopy) if stmt in calltypes: copy_calltypes[scopy] =", "kernel.blocks.values(): scope = block.scope loc = block.loc new_body = []", "below. Without literal_unroll, their # types have to match. #", "elif stmt_index_var.name in const_dict: kernel_consts += [const_dict[stmt_index_var.name]] else: raise NumbaValueError(\"stencil", "then avoid the tuple construction. if len(index_vars) == 1: rvar", "prev_block # Add a jump from all the blocks that", "\"cval\" in self.options: cval = self.options[\"cval\"] cval_ty = typing.typeof.typeof(cval) if", "indexed array names. standard_indexed = self.options.get(\"standard_indexing\", []) if first_arg in", "self.neighborhood = self.options.get(\"neighborhood\") self._type_cache = {} self._lower_me = StencilFuncLowerer(self) def", "wrapper(func) return wrapper def _stencil(mode, options): if mode != 'constant':", "the statements prior to the # sentinel but the new", "new_body.append(ir.Assign(acc_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else:", "return new_func def __call__(self, *args, **kwargs): if (self.neighborhood is not", "if 'neighborhood' in kwtys: argtys_extra += (kwtys['neighborhood'],) sig_extra += \",", "in the input array. Here we create the name for", "ir, calltypes): \"\"\" Create a copy of a given IR", "style \" + mode) def decorated(func): from numba.core import compiler", "= self.options.get(\"standard_indexing\", []) if first_arg in standard_indexed: raise NumbaValueError(\"The first", "(neighborhood, relatively_indexed) def get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT >= 1: print(\"get_return_type\",", "(self.neighborhood is not None and len(self.neighborhood) != args[0].ndim): raise ValueError(\"{}", "func text\") print(func_text) # Force the new stencil function into", "add the index value with a call to # slice_addition.", "of dimensions as the first stencil input.\") argshape = arg.shape", "[getitemvar, index_vars[dim]], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {})", "the IR for the newly created stencil function. from numba.core", "for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else: continue", "si = ir.SetItem(rvar, s_index_var, stmt.value, loc) new_body.append(si) else: new_body.append(stmt) block.body", "\",\" + other_array func_text += \")\\n\" # Get the shape", "numpy_support.as_dtype( return_type.dtype).type.__name__ if \"cval\" in self.options: cval = self.options[\"cval\"] if", "by stencil in Python mode to add the loop index", "sig def copy_ir_with_calltypes(self, ir, calltypes): \"\"\" Create a copy of", "= [] for stmt in block.body: if isinstance(stmt, ir.Return): ret_blocks.append(label)", "+ kernel_copy.arg_names + index_vars) for name, var in var_table.items(): if", "is not None: return wrapper(func) return wrapper def _stencil(mode, options):", "it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func = compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None,", "(float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail) for arg", "nest in this new function is a special sentinel #", "if result is None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if \"cval\"", "nests to the new function. for i in range(the_array.ndim): for", "self.copy_ir_with_calltypes( self.kernel_ir, calltypes) # The stencil kernel body becomes the", "ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes) if \"out\" in name_var_table:", "range(ndim): tmpname = ir_utils.mk_unique_var(\"const_index\") tmpvar = ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim,", "Corporation # SPDX-License-Identifier: BSD-2-Clause # import copy import numpy as", "part of the getitem calls. So, in effect array[-1] becomes", "ir.Var(scope, tmpname, loc) stmt_index_var_typ = typemap[stmt_index_var.name] # If the array", "!= arg.ndim: raise ValueError(\"Secondary stencil array does not have same", "new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options): # called on function without specifying", "self, argtys_extra)]) self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) return sig", "w/ offsets\", ret_blocks, stencil_stub_last_label) print(\"before replace sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before", "ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp],", "used after looking up the variable in # the const", "Returns the # computed size of the stencil kernel and", "new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call = ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc))", "and te.name in const_dict: te = const_dict[te.name] if isinstance(te, int):", "option) wrapper = _stencil(mode, options) if func is not None:", "__call__(self, *args, **kwargs): if (self.neighborhood is not None and len(self.neighborhood)", "absolute location in index0. index_var = ir.Var(scope, index_names[0], loc) tmpname", "in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]) offset +=", "(), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar,", "is in the original # calltypes then add the type", "is used for statements after sentinel. block.body = block.body[i +", "in Python mode to add the loop index to a", "a return in the stencil kernel to the block #", "te = const_dict[te.name] if isinstance(te, int): neighborhood[i][0] = min(neighborhood[i][0], te)", "import compiler kernel_ir = compiler.run_frontend(func) return StencilFunc(kernel_ir, mode, options) return", "getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call = ir.Expr.build_tuple(ind_stencils, loc)", "= max(neighborhood[0][1], index) index_len = 1 else: raise NumbaValueError( \"Non-tuple", "than the first input array. if len(relatively_indexed) > 1: func_text", "Build a tuple from the index ir.Var's. tuple_call = ir.Expr.build_tuple(var_index_vars,", "to arrays passed to stencil \" \\ \"kernels is not", "stencil kernel definition.\") # Add index variables to getitems in", "# But the current block gets a new label. body_first_label", "for lowering calls to a specific StencilFunc. ''' def __init__(self,", "IR invalid. \"\"\" copy_calltypes = {} kernel_copy = ir.copy() kernel_copy.blocks", "the combined stencil function IR + stencil kernel IR into", "sentinel. for label, block in stencil_ir.blocks.items(): for i, inst in", "that will execute the stencil kernel. This function definition includes", "= 1 # Add the loop nests to the new", "(CallableTemplate, signature, infer_global, AbstractTemplate) from numba.core.imputils import lower_builtin from numba.core.extending", "propagation applied to the copied IR will change the calltypes", "result is present, if cval is set then use it", "ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else: continue break", "the sentinel. for label, block in stencil_ir.blocks.items(): for i, inst", "neighborhood # argument to the function. ranges = [] for", "not in standard_indexed): # We found a getitem from the", "execute the stencil kernel. func_text = \"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra)", "**kwargs): if (self.neighborhood is not None and len(self.neighborhood) != args[0].ndim):", "currently self._typingctx = registry.cpu_target.typing_context self._targetctx = registry.cpu_target.target_context self._typingctx.refresh() self._targetctx.refresh() self._install_type(self._typingctx)", "= signature(real_ret, *argtys_extra) dummy_text = (\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra))", "stencil kernel must \" \"use relative indexing, not standard indexing.\")", "context, builder, sig, args): cres = self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None)", "constant def cval_as_str(cval): if not np.isfinite(cval): # See if this", "assignment and remove the sentinel # assignment. Insert the stencil", "\"\" result = None if 'out' in kwtys: argtys_extra +=", "If 1D array then avoid the tuple construction. if len(index_vars)", "ir_utils.dump_blocks(kernel_copy.blocks) # Start to form the new function to execute", "to execute the stencil kernel. func_text = \"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names),", "are guaranteed unique # and don't conflict with any labels", "None: return new_func.entry_point(*args) else: return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options): #", "lo = \"{}[{}][0]\".format(neighborhood_name, i) hi = \"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo, hi))", "return wrapper def _stencil(mode, options): if mode != 'constant': raise", "not in [\"cval\", \"standard_indexing\", \"neighborhood\"]: raise ValueError(\"Unknown stencil option \"", "te.name in const_dict: te = const_dict[te.name] if isinstance(te, int): neighborhood[i][0]", "indexing.\") if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: raise NumbaValueError(\"Standard indexing", "point in the iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name)", "that can # index the array. for dim in range(ndim):", "= sf def __call__(self, context, builder, sig, args): cres =", "the code so we can locate it in the IR.", "will change the calltypes and make subsequent uses of the", "cval = self.options[\"cval\"] cval_ty = typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype):", "need_to_calc_kernel = True else: need_to_calc_kernel = False if len(neighborhood) !=", "We generate a Numba function to execute this stencil and", "add_indices_to_kernel(self, kernel, index_names, ndim, neighborhood, standard_indexed, typemap, calltypes): \"\"\" Transforms", "% (len(neighborhood), ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set() for", "becomes array[index0-1]. \"\"\" const_dict = {} kernel_consts = [] if", "bool[:]) # When more than three total arrays are given,", "ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body = [] # For each", "in args] + [result_type]) else: result = None array_types =", "ranges.append((lo, hi)) # If there are more than one relatively", "*argtys_extra) dummy_text = (\"def __numba_dummy_stencil({}{}):\\n pass\\n\".format( \",\".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in", "the kernel so that our changes for this callsite #", "(l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] = block", "type.\" raise NumbaValueError(msg) out_init =\"{} = np.full({}, {}, dtype=np.{})\\n\".format( out_name,", "\\ \"dimensional input array\" % (len(neighborhood), ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks)", "int): lo = kernel_size[i][0] hi = kernel_size[i][1] else: lo =", "\" \" # ranges[i][0] is the minimum index used in", "ivar = ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) else:", "stencil_ir.blocks[label] = prev_block # Add a jump from all the", "multiple input arrays with different types that are not compatible", "loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))", "maintains the current block # label. prev_block = ir.Block(scope, loc)", "new_block.body = [] # For each statement in each block...", "label, block in stencil_ir.blocks.items(): for i, inst in enumerate(block.body): if", "\" \"relatively indexed arrays.\") for index in kernel_consts: if isinstance(index,", "print(\"new stencil func text\") print(func_text) # Force the new stencil", "and this will be a negative number (potentially -0). Then,", "= const_dict[te.name] if isinstance(te, int): neighborhood[i][0] = min(neighborhood[i][0], te) neighborhood[i][1]", "break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types = args", "block.body = new_body if need_to_calc_kernel: # Find the size of", "to # preclude any entry in the array from being", "= kernel_ir self.mode = mode self.options = options self.kws =", "to the copied IR will change the calltypes and make", "ir.Block(scope, loc) prev_block.body = block.body[:i] # The current block is", "raise ValueError(\"Unknown stencil option \" + option) wrapper = _stencil(mode,", "1: print(\"remembering in const_dict\", stmt.target.name, stmt.value.value) # Remember consts for", "(isinstance( inst, ir.Assign) and inst.target.name == sentinel_name): # We found", "= ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var,", "= \"__numba_stencil_%s_%s\" % ( hex(id(the_array)).replace(\"-\", \"_\"), self.id) # We will", "Put a sentinel in the code so we can locate", "into existence. # Copy the kernel so that our changes", "def get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT >= 1: print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks)", "name of this function. stencil_func_name = \"__numba_stencil_%s_%s\" % ( hex(id(the_array)).replace(\"-\",", "in globals(), locals() dummy_func = eval(\"__numba_dummy_stencil\") sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me,", "with the replaced loop # body in it. ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) new_func", "any labels in the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label)", "(_sig, _, _, _) = self._type_cache[argtys_extra] return _sig (real_ret, typemap,", "getitemvar = ir.Var(scope, getitemname, loc) getitemcall = ir.Expr.getitem(stmt_index_var, const_index_vars[dim], loc)", "msg = \"cval type does not match stencil return type.\"", "[] s_index_name = ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) const_index_vars", "the call-site signature. \"\"\" if (self.neighborhood is not None and", "new_func def _type_me(self, argtys, kwtys): \"\"\" Implement AbstractTemplate.generic() for the", "the extra neighborhood # argument to the function. ranges =", "ranges[i][0] is the minimum index used in the i'th dimension", "# Adjust ret_blocks to account for addition of the offset.", "_ in range(ndim)] if len(kernel_consts) == 0: raise NumbaValueError(\"Stencil kernel", "sig_extra += \", {}=None\".format(neighborhood_name) # Get a list of the", "in range(the_array.ndim): for j in range(offset): func_text += \" \"", "tmpvar = ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc)) const_index_vars", "in the type cache first if argtys_extra in self._type_cache: (_sig,", "# calltypes then add the type associated with this #", "as a tuple that can # index the array. for", "typemap, calltypes, *argtys) return new_func def _type_me(self, argtys, kwtys): \"\"\"", "real_ret = types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return (real_ret, typemap, calltypes) def", "becomes a setitem for that # particular point in the", "maximums would not cause us to # preclude any entry", "\" \\ \"dimensional input array\" % (len(neighborhood), ndim)) tuple_table =", "a sentinel in the code so we can locate it", "for the newly created stencil function. from numba.core import compiler", "no accesses to \" \"relatively indexed arrays.\") for index in", "compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all variables in stencil_ir afresh var_table", "index the array. for dim in range(ndim): tmpname = ir_utils.mk_unique_var(\"const_index\")", "inst, ir.Assign) and inst.target.name == sentinel_name): # We found the", "our changes for this callsite # won't effect other callsites.", "# Compile the combined stencil function with the replaced loop", "location in index0. index_var = ir.Var(scope, index_names[0], loc) tmpname =", "the primary input array.\") from numba.core import typed_passes typemap, return_type,", "if isinstance(stmt_index_var_typ, types.ConstSized): one_index_typ = stmt_index_var_typ[dim] else: one_index_typ = stmt_index_var_typ[:]", "a stencil kernel must \" \"use relative indexing, not standard", "in the stencil kernel becomes a setitem for that #", "ndim: raise NumbaValueError( \"Stencil index does not match array dimensionality.\")", "loop index to a user-specified slice. \"\"\" return slice(the_slice.start +", "self.options[\"cval\"] cval_ty = typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg =", "[result_type]) else: result = None array_types = tuple([typing.typeof.typeof(x) for x", "the # stencil kernel body. func_text += \"{} = 0\\n\".format(sentinel_name)", "call of the value \"returned\" by the kernel into the", "{}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc))", "argtys_extra = argtys sig_extra = \"\" result = None if", "negative maximums would not cause us to # preclude any", "number (potentially -0). Then, we do # unary - on", "not None: return wrapper(func) return wrapper def _stencil(mode, options): if", "neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra = \"\" if result is", "numpy array.\") real_ret = types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return (real_ret, typemap,", "self._type_cache: (_sig, _, _, _) = self._type_cache[argtys_extra] return _sig (real_ret,", "= block.body[i + 1:] # But the current block gets", "if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: raise NumbaValueError(\"Standard indexing requested", "this is a string-repr numerical const, issue #7286 if np.isnan(cval):", "sa_func = numba.njit(slice_addition) sa_func_typ = types.functions.Dispatcher(sa_func) typemap[sa_var.name] = sa_func_typ g_sa", "found in the kernel # and this will be a", "size so as not to try to compute elements where", "tmpname, loc) stmt_index_var_typ = typemap[stmt_index_var.name] # If the array is", "for dim in range(ndim): tmpname = ir_utils.mk_unique_var(\"const_index\") tmpvar = ir.Var(scope,", "self.options.get(\"neighborhood\") self._type_cache = {} self._lower_me = StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks,", "= ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) const_index_vars = []", "[index_var_name] # Create extra signature for out and neighborhood. out_name", "of the kernel # and if the original statement is", "same number \" \" of dimensions as the first stencil", "kernel, index_names, ndim, neighborhood, standard_indexed, typemap, calltypes): \"\"\" Transforms the", "stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types = args new_stencil_param_types", "value \"returned\" by the kernel into the result array. Returns", "kernels.\") sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if config.DEBUG_ARRAY_OPT >= 1: print(\"name_var_table\",", "replace them with a SetItem call of the value \"returned\"", "outside the bounds of the input array would be needed.", "size than the first input array. if len(relatively_indexed) > 1:", "to execute this stencil and here # create the unique", "if self.neighborhood is None: self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT >=", "rename all variables in stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict", "installs a typing class for a StencilFunc object in the", "because the stencil might take # multiple input arrays with", "if stmt_index_var.name in tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]] elif stmt_index_var.name in", "dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self, argtys, kwtys, return_type, sigret):", "in args]) array_types_full = array_types if config.DEBUG_ARRAY_OPT >= 1: print(\"__call__\",", "stmt.value.op in ['getitem', 'static_getitem'] and stmt.value.value.name in kernel.arg_names and stmt.value.value.name", "typemap[sa_var.name] = sa_func_typ g_sa = ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var,", "# allow static_getitem since rewrite passes are applied #raise ValueError(\"Unexpected", "# Add all the parfor loop body blocks to the", "= typemap[stmt_index_var.name] # Same idea as above but you have", "const_dict = {} kernel_consts = [] if config.DEBUG_ARRAY_OPT >= 1:", "third # are iterated over in the loop below. Without", "else: raise NumbaValueError( \"Non-tuple or non-integer used as stencil index.\")", "of 0 and the observed maximum index # in this", "kernel.arg_names) or (isinstance(stmt, ir.SetItem) and stmt.target.name in kernel.arg_names)): raise ValueError(\"Assignments", "= typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg = \"cval type", "(c) 2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause # import copy", "> 1: func_text += \" raise_if_incompatible_array_sizes(\" + first_arg for other_array", "typemap[stmt_index_var.name] # If the array is indexed with a slice", "return \"np.inf\" else: return str(cval) # If we have to", "that previously contained # a return in the stencil kernel", "numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x) for", "whose # use is precluded. # ranges[i][1] is the maximum", "func_text += (\"for {} in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0], shape_name,", "new_body.append(stmt) block.body = new_body if need_to_calc_kernel: # Find the size", "+ mode) def decorated(func): from numba.core import compiler kernel_ir =", "SPDX-License-Identifier: BSD-2-Clause # import copy import numpy as np from", "loc), stmt.target, loc)) else: acc_call = ir.Expr.binop(operator.add, stmt_index_var, index_var, loc)", "case we'll use the extra neighborhood # argument to the", "index0, index1, ... index_vars = [] for i in range(the_array.ndim):", "index found in the kernel # and this will be", "sentinel assignment and replace it with the IR for the", "if func is not None: return wrapper(func) return wrapper def", "body blocks to the gufunc # function's IR. for (l,", "kernel_size[i][1] else: lo = \"{}[{}][0]\".format(neighborhood_name, i) hi = \"{}[{}][1]\".format(neighborhood_name, i)", "i in range(len(ashape)): if ashape[i] > argshape[i]: raise ValueError(\"Secondary stencil", "= compiler.run_frontend(func) return StencilFunc(kernel_ir, mode, options) return decorated @lower_builtin(stencil) def", "indexed # arrays are of different size than the first", "= ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc)) const_index_vars +=", "greater than 0 don't preclude any entry in the array.", "this new function is a special sentinel # assignment. #", "= ir.Var(scope, index_vars[0], loc) new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) else: #", "\"{}[{}][0]\".format(neighborhood_name, i) hi = \"{}[{}][1]\".format(neighborhood_name, i) ranges.append((lo, hi)) # If", "The current block is used for statements after sentinel. block.body", "but you have to extract # individual elements out of", "callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir, calltypes) # The stencil", "from numba.core.extending import register_jitable from numba.core.errors import NumbaValueError from numba.misc.special", "# create the unique name of this function. stencil_func_name =", "ir_utils.get_unused_var_name(\"index\" + str(i), name_var_table) index_vars += [index_var_name] # Create extra", "new_func = compiler.compile_ir( self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {})", "new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return new_func def __call__(self, *args, **kwargs):", "cval = self.options[\"cval\"] if return_type.dtype != typing.typeof.typeof(cval): msg = \"cval", "ir.Global(\"slice_addition\", sa_func, loc) new_body.append(ir.Assign(g_sa, sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var,", "[] for i in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name(\"index\" + str(i),", "passes are applied #raise ValueError(\"Unexpected static_getitem in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) #", "each block... for stmt in ir.blocks[block_label].body: # Copy the statement", "config.DEBUG_ARRAY_OPT >= 1: print(\"__call__\", array_types, args, kwargs) (real_ret, typemap, calltypes)", "supported for CPU context currently self._typingctx = registry.cpu_target.typing_context self._targetctx =", "dimensional input array\" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys sig_extra", "# won't effect other callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( self.kernel_ir,", "out_name = ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name = ir_utils.get_unused_var_name(\"neighborhood\", name_var_table) sig_extra =", "was not used) # then us numpy.full if the user", "print(\"before replace sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks)", "= tuple([typing.typeof.typeof(x) for x in args]) array_types_full = array_types if", "the new block maintains the current block # label. prev_block", "loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt) block.body", "1D array then avoid the tuple construction. if len(index_vars) ==", "need_to_calc_kernel: assert hasattr(stmt_index_var, 'name') if stmt_index_var.name in tuple_table: kernel_consts +=", "Add index variables to getitems in the IR to transition", "a list of the relatively indexed # arrays. kernel_size, relatively_indexed", "new function. # 4) Split the block containing the sentinel", "arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed,", "stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) +", "and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['getitem', 'static_getitem'] and stmt.value.value.name", "import numba import operator from numba.np import numpy_support class StencilFuncLowerer(object):", "stencils only supported for CPU context currently self._typingctx = registry.cpu_target.typing_context", "= calltypes[stmt] kernel_copy.blocks[block_label] = new_block return (kernel_copy, copy_calltypes) def _stencil_wrapper(self,", "print(\"After add_indices_to_kernel\") ir_utils.dump_blocks(kernel_copy.blocks) # The return in the stencil kernel", ">= 1: print(\"name_var_table\", name_var_table, sentinel_name) the_array = args[0] if config.DEBUG_ARRAY_OPT", "they are guaranteed unique # and don't conflict with any", "Overall approach: # 1) Construct a string containing a function", "here # create the unique name of this function. stencil_func_name", "\", {}=None\".format(neighborhood_name) # Get a list of the standard indexed", "shape_name, i, ranges[i][1]) offset += 1 for j in range(offset):", "# default style func = func_or_mode else: mode = func_or_mode", "# the array using the tuple index. si = ir.SetItem(rvar,", "numba.np import numpy_support class StencilFuncLowerer(object): '''Callable class responsible for lowering", "ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else: new_body.append(stmt)", "in name_var_table: raise NumbaValueError(\"Cannot use the reserved word 'out' in", "and stmt.target.name in kernel.arg_names)): raise ValueError(\"Assignments to arrays passed to", "reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) +", "calltype information. We need a copy of the calltypes because", "= copy.deepcopy(ir.blocks[block_label]) new_block.body = [] # For each statement in", "be # (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail)", "offsets\", ret_blocks, stencil_stub_last_label) print(\"before replace sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace", "if result is not None: sig_extra += \", {}=None\".format(out_name) if", "the sentinel # assignment. Insert the stencil kernel IR into", "**options): # called on function without specifying mode style if", "literal_unroll here because the stencil might take # multiple input", "array_types_full = tuple([typing.typeof.typeof(x) for x in args] + [result_type]) else:", "to getitems in the IR to transition the accesses #", "error if any of the relatively indexed # arrays are", "a StencilFunc object in the input typing context. \"\"\" _ty_cls", "= ir.Expr.binop(operator.add, getitemvar, index_vars[dim], loc) new_body.append(ir.Assign(acc_call, tmpvar, loc)) tuple_call =", "raise_if_incompatible_array_sizes(\" + first_arg for other_array in relatively_indexed: if other_array !=", "result = kwargs['out'] rdtype = result.dtype rttype = numpy_support.from_dtype(rdtype) result_type", "+ addend, the_slice.stop + addend) class StencilFunc(object): \"\"\" A special", "{} self._lower_me = StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks, index_vars, out_name): \"\"\"", "to the minimum labelled block of # the parfor body.", "in const_dict\", stmt.target.name, stmt.value.value) # Remember consts for use later.", "0 don't preclude any entry in the array. # So,", "in kernel.arg_names) or (isinstance(stmt, ir.SetItem) and stmt.target.name in kernel.arg_names)): raise", "a.ndim != arg.ndim: raise ValueError(\"Secondary stencil array does not have", "new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) else: # Convert the string names", "calltypes) = self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra) dummy_text = (\"def", "# Start to form the new function to execute the", "function definition for the stencil function # that will execute", "above but you have to extract # individual elements out", "not None: pysig = utils.pysignature(stencil_func) sigret.pysig = pysig # Get", "const_dict[stmt.target.name] = stmt.value.value if ((isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and", "combined stencil function IR + stencil kernel IR into existence.", "block # label. prev_block = ir.Block(scope, loc) prev_block.body = block.body[:i]", "input array. Here we create the name for # the", "with a SetItem call of the value \"returned\" by the", "IR of this new function. # 4) Split the block", "args[0].ndim)) if 'out' in kwargs: result = kwargs['out'] rdtype =", "StencilFuncLowerer(object): '''Callable class responsible for lowering calls to a specific", "def compile_for_argtys(self, argtys, kwtys, return_type, sigret): # look in the", "because negative maximums would not cause us to # preclude", "{}, dtype=np.{})\\n\".format( out_name, shape_name, cval_as_str(cval), return_type_name) else: out_init =\"{} =", "self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return new_func def", "[index_var] tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) ind_stencils", "ret_blocks to account for addition of the offset. ret_blocks =", "return in the stencil kernel becomes a setitem for that", "from numba.core import compiler kernel_ir = compiler.run_frontend(func) return StencilFunc(kernel_ir, mode,", "in kernel_consts: if isinstance(index, tuple) or isinstance(index, list): for i", "isinstance(stmt.value, ir.Expr) and stmt.value.op in ['getitem', 'static_getitem'] and stmt.value.value.name in", "numerical const, issue #7286 if np.isnan(cval): return \"np.nan\" elif np.isinf(cval):", "of the offset. ret_blocks = [x + stencil_stub_last_label for x", "1: print(\"After replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to form the", "sentinel. for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else:", "= ir.Expr.build_tuple(ind_stencils, loc) new_body.append(ir.Assign(tuple_call, s_index_var, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value,s_index_var,loc), stmt.target,loc)) else:", "# and don't conflict with any labels in the stencil_ir.", "numpy_support.map_layout(result)) array_types = tuple([typing.typeof.typeof(x) for x in args]) array_types_full =", "else: return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options): # called on function", "# the index variable for each dimension. index0, index1, ...", "in the type cache to find if result array is", "unique # and don't conflict with any labels in the", "not have same number \" \" of dimensions as the", "of the input array. Those loop nests use the #", "def __init__(self, kernel_ir, mode, options): self.id = type(self).id_counter type(self).id_counter +=", "signature for out and neighborhood. out_name = ir_utils.get_unused_var_name(\"out\", name_var_table) neighborhood_name", "1 # Adjust ret_blocks to account for addition of the", "= [] ind_stencils = [] stmt_index_var_typ = typemap[stmt_index_var.name] # Same", "only supported for CPU context currently self._typingctx = registry.cpu_target.typing_context self._targetctx", "= args[0] if config.DEBUG_ARRAY_OPT >= 1: print(\"_stencil_wrapper\", return_type, return_type.dtype, type(return_type.dtype),", "rvar = ir.Var(scope, out_name, loc) ivar = ir.Var(scope, index_vars[0], loc)", "is precluded. # ranges[i][1] is the maximum of 0 and", "is computed by # adding the relative offset in stmt.value.index", "Add the loop nests to the new function. for i", "*args): # Overall approach: # 1) Construct a string containing", "+ stencil_stub_last_label for x in ret_blocks] if config.DEBUG_ARRAY_OPT >= 1:", "needed. # 2) The but of the loop nest in", "type of this particular part of the index tuple. if", "and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['setitem', 'static_setitem'] and stmt.value.value.name", "in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) # Store the index used after looking", "result.dtype rttype = numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types", "with a call to # slice_addition. if isinstance(one_index_typ, types.misc.SliceType): sa_var", "slice_addition(the_slice, addend): \"\"\" Called by stencil in Python mode to", "ir_utils.mk_unique_var(\"const_index\") tmpvar = ir.Var(scope, tmpname, loc) new_body.append(ir.Assign(ir.Const(dim, loc), tmpvar, loc))", "by # adding the relative offset in stmt.value.index to #", "# Same idea as above but you have to extract", "kernel copy so they are guaranteed unique # and don't", "return new_func.entry_point(*(args+(result,))) def stencil(func_or_mode='constant', **options): # called on function without", "body becomes the body of a loop, for which args", "= ir_utils.mk_unique_var(\"getitem\") getitemvar = ir.Var(scope, getitemname, loc) getitemcall = ir.Expr.getitem(stmt_index_var,", "a call to # slice_addition. if isinstance(one_index_typ, types.misc.SliceType): sa_var =", "import copy import numpy as np from llvmlite import ir", "index_var], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call,", "ind_stencils = [] stmt_index_var_typ = typemap[stmt_index_var.name] # Same idea as", "if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['getitem',", "tuple_table: kernel_consts += [tuple_table[stmt_index_var.name]] elif stmt_index_var.name in const_dict: kernel_consts +=", "from being used. func_text += (\"for {} in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format(", "indexed # arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim,", "a function definition for the stencil function # that will", "argtys[0].layout) return (real_ret, typemap, calltypes) def _install_type(self, typingctx): \"\"\"Constructs and", "= kernel_size[i][0] hi = kernel_size[i][1] else: lo = \"{}[{}][0]\".format(neighborhood_name, i)", "scope = block.scope loc = block.loc new_body = [] for", "the tuple construction. if len(index_vars) == 1: rvar = ir.Var(scope,", "# See if this is a string-repr numerical const, issue", "str(cval) # If we have to allocate the output array", "range(len(ashape)): if ashape[i] > argshape[i]: raise ValueError(\"Secondary stencil array has", "isinstance(index, int): neighborhood[0][0] = min(neighborhood[0][0], index) neighborhood[0][1] = max(neighborhood[0][1], index)", "from all the blocks that previously contained # a return", "BSD-2-Clause # import copy import numpy as np from llvmlite", "of a given IR along with its calltype information. We", "= self._type_cache[argtys] new_func = self._stencil_wrapper(result, sigret, return_type, typemap, calltypes, *argtys)", "+ 1:] # But the current block gets a new", "[\"cval\", \"standard_indexing\", \"neighborhood\"]: raise ValueError(\"Unknown stencil option \" + option)", "lir from numba.core import types, typing, utils, ir, config, ir_utils,", "and don't conflict with any labels in the stencil_ir. kernel_copy.blocks", "arg in literal_unroll(args): if a.ndim != arg.ndim: raise ValueError(\"Secondary stencil", "name in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label =", "after the sentinel. for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc))", "cres.fndesc, sig, args) context.add_linking_libs([cres.library]) return res @register_jitable def raise_if_incompatible_array_sizes(a, *args):", "this will be a negative number (potentially -0). Then, we", "specified for %d \" \\ \"dimensional input array\" % (len(neighborhood),", "kernel_copy.blocks = {} # For each block... for (block_label, block)", "# assignment. # 3) Get the IR of this new", "in self.options: cval = self.options[\"cval\"] if return_type.dtype != typing.typeof.typeof(cval): msg", "first stencil input.\") argshape = arg.shape for i in range(len(ashape)):", "and stmt.value.value.name in kernel.arg_names) or (isinstance(stmt, ir.SetItem) and stmt.target.name in", "to a stencil kernel must \" \"be the primary input", "isinstance(func_or_mode, str): mode = 'constant' # default style func =", "!= ndim: raise ValueError(\"%d dimensional neighborhood specified for %d \"", "add the corresponding index variable # to them and then", "name \" \"not present in the stencil kernel definition.\") #", "sigret.pysig = pysig # Get the IR for the newly", "builder, sig, args): cres = self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None) res", "kernel body. func_text += \"{} = 0\\n\".format(sentinel_name) func_text += \"", "isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError(\"The first argument to a stencil kernel", "= np.full({}, {}, dtype=np.{})\\n\".format( out_name, shape_name, cval_as_str(cval), return_type_name) else: out_init", "# The current block is used for statements after sentinel.", "cval_as_str(cval)) func_text += \" \" + out_init offset = 1", "[tmpvar] getitemname = ir_utils.mk_unique_var(\"getitem\") getitemvar = ir.Var(scope, getitemname, loc) getitemcall", "+= \", {}=None\".format(neighborhood_name) # Get a list of the standard", "len(self.neighborhood) != argtys[0].ndim): raise NumbaValueError(\"%d dimensional neighborhood specified \" \"for", "print(\"name_var_table\", name_var_table, sentinel_name) the_array = args[0] if config.DEBUG_ARRAY_OPT >= 1:", "each statement in each block... for stmt in ir.blocks[block_label].body: #", "import compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all variables", "regular Python indexing. Returns the # computed size of the", "tuple) or isinstance(index, list): for i in range(len(index)): te =", "= {} self._lower_me = StencilFuncLowerer(self) def replace_return_with_setitem(self, blocks, index_vars, out_name):", "return (neighborhood, relatively_indexed) def get_return_type(self, argtys): if config.DEBUG_ARRAY_OPT >= 1:", "i in range(len(index)): te = index[i] if isinstance(te, ir.Var) and", "options) return decorated @lower_builtin(stencil) def stencil_dummy_lower(context, builder, sig, args): \"lowering", "negative number (potentially -0). Then, we do # unary -", "[] for i in range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo =", "the stencil kernel and a list of the relatively indexed", "args new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT >= 1: print(\"new_stencil_param_types\", new_stencil_param_types)", "each dimension. index0, index1, ... index_vars = [] for i", "in relatively_indexed: if other_array != first_arg: func_text += \",\" +", "= kwargs['out'] rdtype = result.dtype rttype = numpy_support.from_dtype(rdtype) result_type =", "\"not present in the stencil kernel definition.\") # Add index", "if not isinstance(func_or_mode, str): mode = 'constant' # default style", "import literal_unroll import numba import operator from numba.np import numpy_support", "if need_to_calc_kernel: assert hasattr(stmt_index_var, 'name') if stmt_index_var.name in tuple_table: kernel_consts", "1 # Add the loop nests to the new function.", "sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) new_body.append(ir.Assign( ir.Expr.getitem(stmt.value.value, tmpvar,", "IR for the newly created stencil function. from numba.core import", "variable 'index0'. # tmpvar will hold the real index and", "# import copy import numpy as np from llvmlite import", "neighborhood, standard_indexed, typemap, calltypes): \"\"\" Transforms the stencil kernel as", "_ty_cls = type('StencilFuncTyping_' + str(self.id), (AbstractTemplate,), dict(key=self, generic=self._type_me)) typingctx.insert_user_function(self, _ty_cls)", "Insert the stencil kernel IR into the stencil function IR", "= ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text += \" {} = {}.shape\\n\".format(shape_name, first_arg)", "one that includes each dimension's index variable as part of", "None if 'out' in kwtys: argtys_extra += (kwtys['out'],) sig_extra +=", "label. prev_block = ir.Block(scope, loc) prev_block.body = block.body[:i] # The", "func_text += \" \" + out_init else: # result is", "ret_blocks, stencil_stub_last_label) print(\"before replace sentinel stencil_ir\") ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace sentinel", "function into existence. exec(func_text) in globals(), locals() stencil_func = eval(stencil_func_name)", "kernel_ir = compiler.run_frontend(func) return StencilFunc(kernel_ir, mode, options) return decorated @lower_builtin(stencil)", "look in the type cache to find if result array", "list(array_types) if config.DEBUG_ARRAY_OPT >= 1: print(\"new_stencil_param_types\", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile", "block.loc new_body = [] for stmt in block.body: if isinstance(stmt,", "''' def __init__(self, sf): self.stencilFunc = sf def __call__(self, context,", "the relatively indexed # arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy,", "shape_name] + kernel_copy.arg_names + index_vars) for name, var in var_table.items():", "out_init offset = 1 # Add the loop nests to", "%d dimensional input array\" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys", "and third # are iterated over in the loop below.", "ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) ind_stencils += [tmpvar] getitemname", "else: continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert(isinstance(the_array, types.Type)) array_types", "style func = func_or_mode else: mode = func_or_mode func =", "we have to allocate the output array (the out argument", "must \" \"be the primary input array.\") from numba.core import", "kernel_copy = ir.copy() kernel_copy.blocks = {} # For each block...", "\" \"dimensional input array\".format( len(self.neighborhood), args[0].ndim)) if 'out' in kwargs:", "array.\") real_ret = types.npytypes.Array(return_type, argtys[0].ndim, argtys[0].layout) return (real_ret, typemap, calltypes)", "specified for {} \" \"dimensional input array\".format( len(self.neighborhood), args[0].ndim)) if", "globals(), locals() dummy_func = eval(\"__numba_dummy_stencil\") sig = sig.replace(pysig=utils.pysignature(dummy_func)) self._targetctx.insert_func_defn([(self._lower_me, self,", "IR. # 5) Compile the combined stencil function IR +", "\" + option) wrapper = _stencil(mode, options) if func is", "'out' in stencil kernels.\") sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table) if config.DEBUG_ARRAY_OPT", "(Just (float[:], bool[:]) wouldn't fail) for arg in literal_unroll(args): if", "getitemvar, loc)) # Get the type of this particular part", "than 0 don't preclude any entry in the array. #", "can # index the array. for dim in range(ndim): tmpname", "\" + mode) def decorated(func): from numba.core import compiler kernel_ir", "config.DEBUG_ARRAY_OPT >= 1: print(\"After replace_return_with_setitem\", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to", "block in blocks.items(): scope = block.scope loc = block.loc new_body", "ir.copy() kernel_copy.blocks = {} # For each block... for (block_label,", "function for each # dimension in the input array. Here", "0: return \"-np.inf\" else: return \"np.inf\" else: return str(cval) #", "is not None and len(self.neighborhood) != argtys[0].ndim): raise NumbaValueError(\"%d dimensional", "observed maximum index # in this dimension because negative maximums", "else: raise NumbaValueError(\"stencil kernel index is not \" \"constant, 'neighborhood'", "indexed arrays.\") for index in kernel_consts: if isinstance(index, tuple) or", "in range(len(ashape)): if ashape[i] > argshape[i]: raise ValueError(\"Secondary stencil array", "will be a negative number (potentially -0). Then, we do", "1: print(\"__call__\", array_types, args, kwargs) (real_ret, typemap, calltypes) = self.get_return_type(array_types)", "index_names[0], loc) tmpname = ir_utils.mk_unique_var(\"stencil_index\") tmpvar = ir.Var(scope, tmpname, loc)", "the calltypes copy. scopy = copy.deepcopy(stmt) new_block.body.append(scopy) if stmt in", "# Overall approach: # 1) Construct a string containing a", "for label, block in stencil_ir.blocks.items(): for i, inst in enumerate(block.body):", "from numba.misc.special import literal_unroll import numba import operator from numba.np", "if result array is passed (_, result, typemap, calltypes) =", "names. standard_indexed = self.options.get(\"standard_indexing\", []) if first_arg in standard_indexed: raise", "the calltypes because copy propagation applied to the copied IR", "new_label = max(kernel_copy.blocks.keys()) + 1 # Adjust ret_blocks to account", "self.stencilFunc.compile_for_argtys(sig.args, {}, sig.return_type, None) res = context.call_internal(builder, cres.fndesc, sig, args)", "neighborhood is None: need_to_calc_kernel = True else: need_to_calc_kernel = False", "hold the real index and is computed by # adding", "a string containing a function definition for the stencil function", "if isinstance(return_type, types.npytypes.Array): raise NumbaValueError( \"Stencil kernel must return a", "for (block_label, block) in ir.blocks.items(): new_block = copy.deepcopy(ir.blocks[block_label]) new_block.body =", "= stmt.value.index else: stmt_index_var = stmt.value.index_var # allow static_getitem since", "make subsequent uses of the original IR invalid. \"\"\" copy_calltypes", "numpy as np from llvmlite import ir as lir from", "dtype=np.{})\\n\".format( out_name, shape_name, cval_as_str(cval), return_type_name) else: out_init =\"{} = np.zeros({},", "(isinstance(stmt, ir.SetItem) and stmt.target.name in kernel.arg_names)): raise ValueError(\"Assignments to arrays", "unique name of this function. stencil_func_name = \"__numba_stencil_%s_%s\" % (", "the input array. Here we create the name for #", "# A new block is allocated for the statements prior", "ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate( kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes) if \"out\" in", "so they are guaranteed unique # and don't conflict with", "present, if cval is set then use it if \"cval\"", "not allowed.\") if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op", "ir_utils, registry from numba.core.typing.templates import (CallableTemplate, signature, infer_global, AbstractTemplate) from", "indexed with a slice then we # have to add", "slice_addition. if isinstance(stmt_index_var_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func", "if config.DEBUG_ARRAY_OPT >= 1: print(\"get_return_type\", argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0],", "ir.Expr.getitem(stmt.value.value, tmpvar, loc), stmt.target, loc)) else: index_vars = [] sum_results", "+= [tmpvar] getitemname = ir_utils.mk_unique_var(\"getitem\") getitemvar = ir.Var(scope, getitemname, loc)", "if isinstance(stmt, ir.Return): ret_blocks.append(label) # If 1D array then avoid", "for # the index variable for each dimension. index0, index1,", "args, kwargs) (real_ret, typemap, calltypes) = self.get_return_type(array_types) new_func = self._stencil_wrapper(result,", "\" \"use relative indexing, not standard indexing.\") if len(set(standard_indexed) -", "# Write the return statements original value into # the", "by StencilFunc._install_type(). Return the call-site signature. \"\"\" if (self.neighborhood is", "kernel # and if the original statement is in the", "not isinstance(func_or_mode, str): mode = 'constant' # default style func", "= tuple([typing.typeof.typeof(x) for x in args] + [result_type]) else: result", "and stmt.value.value.name not in standard_indexed): # We found a getitem", "\" # Put a sentinel in the code so we", "need_to_calc_kernel: # Find the size of the kernel by finding", "i in range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo = kernel_size[i][0] hi", "the kernel copy so they are guaranteed unique # and", "raise NumbaValueError(\"Cannot use the reserved word 'out' in stencil kernels.\")", "this particular part of the index tuple. if isinstance(stmt_index_var_typ, types.ConstSized):", "(isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Const)): if config.DEBUG_ARRAY_OPT >= 1: print(\"remembering", "for the sentinel. for label, block in stencil_ir.blocks.items(): for i,", "ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set() for block in", "in args]) array_types_full = tuple([typing.typeof.typeof(x) for x in args] +", "ind_stencils += [tmpvar] getitemname = ir_utils.mk_unique_var(\"getitem\") getitemvar = ir.Var(scope, getitemname,", "loop, for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0]", "gets a new label. body_first_label = min(kernel_copy.blocks.keys()) # The previous", "IR for the # stencil kernel body. func_text += \"{}", "const_dict: kernel_consts += [const_dict[stmt_index_var.name]] else: raise NumbaValueError(\"stencil kernel index is", "becomes the body of a loop, for which args aren't", "the original # calltypes then add the type associated with", "name_var_table) sig_extra = \"\" if result is not None: sig_extra", "= prev_block # Add a jump from all the blocks", "across __sentinel__ # A new block is allocated for the", "input array. if len(relatively_indexed) > 1: func_text += \" raise_if_incompatible_array_sizes(\"", "index # in this dimension because negative maximums would not", "block.body[i + 1:] # But the current block gets a", "return_type, calltypes, _ = typed_passes.type_inference_stage( self._typingctx, self._targetctx, self.kernel_ir, argtys, None,", "of the first input array. shape_name = ir_utils.get_unused_var_name(\"full_shape\", name_var_table) func_text", "in the code so we can locate it in the", "arrays with different types that are not compatible # (e.g.", "= kwtys['out'] if 'neighborhood' in kwtys: argtys_extra += (kwtys['neighborhood'],) sig_extra", "without literal_unroll might be # (float[:], float[:], bool[:]) (Just (float[:],", "raise ValueError(\"Secondary stencil array has some dimension \" \"smaller the", "self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes) if self.neighborhood", "b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] = prev_block # Add a", "definition includes a # unique stencil function name, the parameters", "statement in each block... for stmt in ir.blocks[block_label].body: # Copy", "try to compute elements where # elements outside the bounds", "the shape of the first input array. shape_name = ir_utils.get_unused_var_name(\"full_shape\",", "standard_indexed, typemap, calltypes): \"\"\" Transforms the stencil kernel as specified", "existence. # Copy the kernel so that our changes for", "used. func_text += (\"for {} in range(-min(0,{}),\" \"{}[{}]-max(0,{})):\\n\").format( index_vars[i], ranges[i][0],", "remove this sentinel assignment and replace it with the IR", "all variables in stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict =", "new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 # Shift labels in", "array name \" \"not present in the stencil kernel definition.\")", "parfor body. prev_block.append(ir.Jump(body_first_label, loc)) # Add all the parfor loop", "copy_calltypes) def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args): #", "option required\") if ndim == 1: # Single dimension always", "with the stencil function IR. # 5) Compile the combined", "that our changes for this callsite # won't effect other", "print(func_text) # Force the new stencil function into existence. exec(func_text)", "\"standard_indexing\", \"neighborhood\"]: raise ValueError(\"Unknown stencil option \" + option) wrapper", "raise NumbaValueError( \"Stencil index does not match array dimensionality.\") return", "use later. const_dict[stmt.target.name] = stmt.value.value if ((isinstance(stmt, ir.Assign) and isinstance(stmt.value,", "Construct a string containing a function definition for the stencil", "return_type.dtype).type.__name__ if \"cval\" in self.options: cval = self.options[\"cval\"] if return_type.dtype", "kernel and a list of the relatively indexed # arrays.", "the minimum index found in the kernel # and this", "= self.copy_ir_with_calltypes( self.kernel_ir, calltypes) # The stencil kernel body becomes", "ir.Var's. var_index_vars = [] for one_var in index_vars: index_var =", "= self._stencil_wrapper(result, sigret, return_type, typemap, calltypes, *argtys) return new_func def", "the current block # label. prev_block = ir.Block(scope, loc) prev_block.body", "ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to form the new function to", "for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps,", "typingctx.insert_user_function(self, _ty_cls) def compile_for_argtys(self, argtys, kwtys, return_type, sigret): # look", "ir, config, ir_utils, registry from numba.core.typing.templates import (CallableTemplate, signature, infer_global,", "stencil might take # multiple input arrays with different types", "(isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op in ['getitem', 'static_getitem']", "tuple that can # index the array. for dim in", "the block in the stencil outline for the sentinel. for", "a user-specified slice. \"\"\" return slice(the_slice.start + addend, the_slice.stop +", "index and is computed by # adding the relative offset", "sa_var, loc)) slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc) calltypes[slice_addition_call]", "calltypes copy. scopy = copy.deepcopy(stmt) new_block.body.append(scopy) if stmt in calltypes:", "func_text += \" \" + out_init offset = 1 #", "= ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) ind_stencils += [tmpvar]", "ir_utils.dump_blocks(stencil_ir.blocks) print(\"before replace sentinel kernel_copy\") ir_utils.dump_blocks(kernel_copy.blocks) # Search all the", "loc)) else: acc_call = ir.Expr.binop(operator.add, stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar,", "reserved_names = ([sentinel_name, out_name, neighborhood_name, shape_name] + kernel_copy.arg_names + index_vars)", "applied #raise ValueError(\"Unexpected static_getitem in add_indices_to_kernel.\") relatively_indexed.add(stmt.value.value.name) # Store the", "__sentinel__ # A new block is allocated for the statements", "copy import numpy as np from llvmlite import ir as", "loc), tmpvar, loc)) const_index_vars += [tmpvar] index_var = ir.Var(scope, index_names[dim],", "self.kernel_ir = kernel_ir self.mode = mode self.options = options self.kws", "# sentinel but the new block maintains the current block", "index. si = ir.SetItem(rvar, s_index_var, stmt.value, loc) new_body.append(si) else: new_body.append(stmt)", "individual elements out of the tuple indexing # expression and", "index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]) offset += 1 for j", "as bool[:]) # When more than three total arrays are", "loc) index_vars += [index_var] tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar = ir.Var(scope,", "\" of dimensions as the first stencil input.\") argshape =", "loc) const_index_vars = [] ind_stencils = [] stmt_index_var_typ = typemap[stmt_index_var.name]", "if not isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError(\"The first argument to a", "minimum's greater than 0 don't preclude any entry in the", "calltypes, *argtys) return new_func def _type_me(self, argtys, kwtys): \"\"\" Implement", "if isinstance(kernel_size[i][0], int): lo = kernel_size[i][0] hi = kernel_size[i][1] else:", "for i in range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo = kernel_size[i][0]", "\" \\ \"kernels is not allowed.\") if (isinstance(stmt, ir.Assign) and", "+ addend) class StencilFunc(object): \"\"\" A special type to hold", "\" \" + out_init offset = 1 # Add the", "[stmt_index_var, index_var], (), loc) calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {})", "execute the stencil kernel. This function definition includes a #", "_, _, _) = self._type_cache[argtys_extra] return _sig (real_ret, typemap, calltypes)", "in dict(self.kws): sig_extra += \", {}=None\".format(neighborhood_name) # Get a list", "to # slice_addition. if isinstance(one_index_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"),", "same dimension in the first \" \"stencil input.\") def slice_addition(the_slice,", "ranges for each dimension, which could be either int #", "arg.shape for i in range(len(ashape)): if ashape[i] > argshape[i]: raise", "finding the maximum absolute value # index used in the", "tmpname = ir_utils.mk_unique_var(\"stencil_index\") tmpvar = ir.Var(scope, tmpname, loc) stmt_index_var_typ =", "+= [const_dict[stmt_index_var.name]] else: raise NumbaValueError(\"stencil kernel index is not \"", "self._type_cache[argtys_extra] return _sig (real_ret, typemap, calltypes) = self.get_return_type(argtys) sig =", "callsite # won't effect other callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(", "range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name(\"index\" + str(i), name_var_table) index_vars += [index_var_name]", "Convert the string names of the index variables into #", "in range(len(index)): te = index[i] if isinstance(te, ir.Var) and te.name", "assignment. loc = inst.loc scope = block.scope # split block", "option \" + option) wrapper = _stencil(mode, options) if func", "else: acc_call = ir.Expr.binop(operator.add, stmt_index_var, index_var, loc) new_body.append(ir.Assign(acc_call, tmpvar, loc))", "0 def __init__(self, kernel_ir, mode, options): self.id = type(self).id_counter type(self).id_counter", "An example failing signature without literal_unroll might be # (float[:],", "size of the kernel by finding the maximum absolute value", "argtys) ir_utils.dump_blocks(self.kernel_ir.blocks) if not isinstance(argtys[0], types.npytypes.Array): raise NumbaValueError(\"The first argument", "in ['getitem', 'static_getitem'] and stmt.value.value.name in kernel.arg_names and stmt.value.value.name not", "ir_utils.get_tuple_table(kernel.blocks) relatively_indexed = set() for block in kernel.blocks.values(): scope =", "isinstance(one_index_typ, types.misc.SliceType): sa_var = ir.Var(scope, ir_utils.mk_unique_var(\"slice_addition\"), loc) sa_func = numba.njit(slice_addition)", "\"Stencil index does not match array dimensionality.\") return (neighborhood, relatively_indexed)", "array[index0-1]. \"\"\" const_dict = {} kernel_consts = [] if config.DEBUG_ARRAY_OPT", "= typemap[stmt_index_var.name] # If the array is indexed with a", "for index in kernel_consts: if isinstance(index, tuple) or isinstance(index, list):", "stencil kernel. func_text = \"def {}({}{}):\\n\".format(stencil_func_name, \",\".join(kernel_copy.arg_names), sig_extra) # Get", "of the input array would be needed. # 2) The", "array[-1] becomes array[index0-1]. \"\"\" const_dict = {} kernel_consts = []", "index_vars += [index_var] tmpname = ir_utils.mk_unique_var(\"ind_stencil_index\") tmpvar = ir.Var(scope, tmpname,", "not match stencil return type.\" raise NumbaValueError(msg) out_init = \"{}[:]", "# Add a jump from all the blocks that previously", "numba import operator from numba.np import numpy_support class StencilFuncLowerer(object): '''Callable", "result = None array_types = tuple([typing.typeof.typeof(x) for x in args])", "else: return \"np.inf\" else: return str(cval) # If we have", "ir_utils.mk_unique_var(\"stencil_index\") s_index_var = ir.Var(scope, s_index_name, loc) # Build a tuple", "# index the array. for dim in range(ndim): tmpname =", "# the parfor body. prev_block.append(ir.Jump(body_first_label, loc)) # Add all the", "or non-integer used as stencil index.\") if index_len != ndim:", "is None: need_to_calc_kernel = True else: need_to_calc_kernel = False if", "So, take the minimum of 0 and the minimum index", "max(neighborhood[i][1], te) else: raise NumbaValueError( \"stencil kernel index is not", "= [[0,0] for _ in range(ndim)] if len(kernel_consts) == 0:", "not None and len(self.neighborhood) != argtys[0].ndim): raise NumbaValueError(\"%d dimensional neighborhood", "minimum labelled block of # the parfor body. prev_block.append(ir.Jump(body_first_label, loc))", "always has index variable 'index0'. # tmpvar will hold the", "# If the array is indexed with a slice then", "= ir.Var(scope, s_index_name, loc) const_index_vars = [] ind_stencils = []", "used) # then us numpy.full if the user specified a", "kernel_consts += [const_dict[stmt_index_var.name]] else: raise NumbaValueError(\"stencil kernel index is not", "the type associated with this # statement to the calltypes", "option not in [\"cval\", \"standard_indexing\", \"neighborhood\"]: raise ValueError(\"Unknown stencil option", "\"np.inf\" else: return str(cval) # If we have to allocate", "None: pysig = utils.pysignature(stencil_func) sigret.pysig = pysig # Get the", "stencil decorator option # or np.zeros if they didn't to", "typemap, calltypes) = self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra) dummy_text =", "reserved word 'out' in stencil kernels.\") sentinel_name = ir_utils.get_unused_var_name(\"__sentinel__\", name_var_table)", "kernel_ir self.mode = mode self.options = options self.kws = []", "= None if 'out' in kwtys: argtys_extra += (kwtys['out'],) sig_extra", "if not np.isfinite(cval): # See if this is a string-repr", "isinstance(te, int): neighborhood[i][0] = min(neighborhood[i][0], te) neighborhood[i][1] = max(neighborhood[i][1], te)", "stencil \" \\ \"kernels is not allowed.\") if (isinstance(stmt, ir.Assign)", "# For each statement in each block... for stmt in", "\"dimensional input array\" % (len(neighborhood), ndim)) tuple_table = ir_utils.get_tuple_table(kernel.blocks) relatively_indexed", "numpy.full if the user specified a cval stencil decorator option", "the stencil outline for the sentinel. for label, block in", "sentinel_name): # We found the sentinel assignment. loc = inst.loc", "of # the parfor body. prev_block.append(ir.Jump(body_first_label, loc)) # Add all", "dimension's index variable as part of the getitem calls. So,", "any entry in the array. # So, take the minimum", "= [] for label, block in blocks.items(): scope = block.scope", "a function that will raise an error if any of", "raise NumbaValueError(\"Stencil kernel with no accesses to \" \"relatively indexed", "If there are more than one relatively indexed arrays, add", "an error if any of the relatively indexed # arrays", "stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] = prev_block # Add a jump", "copy of a given IR along with its calltype information.", "raise NumbaValueError( \"stencil kernel index is not constant,\" \"'neighborhood' option", "rttype = numpy_support.from_dtype(rdtype) result_type = types.npytypes.Array(rttype, result.ndim, numpy_support.map_layout(result)) array_types =", "Store the index used after looking up the variable in", "the kernel specification. neighborhood = [[0,0] for _ in range(ndim)]", "IR will change the calltypes and make subsequent uses of", "lo = kernel_size[i][0] hi = kernel_size[i][1] else: lo = \"{}[{}][0]\".format(neighborhood_name,", "they didn't to allocate the array. if result is None:", "{} = {}.shape\\n\".format(shape_name, first_arg) # Converts cval to a string", "= False if len(neighborhood) != ndim: raise ValueError(\"%d dimensional neighborhood", "def cval_as_str(cval): if not np.isfinite(cval): # See if this is" ]
[ "\"\"\" Recoever linearized dynamics dfdx as a function of x,", "def dynamics_batch(self, x, u): \"\"\" Batch dynamics. Uses pytorch for", "x.shape[1] + u.shape[1])) for i in range(x.shape[0]): dxdu_batch[i] = self.jacobian_xu(x[i],", "env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env)", "= {self.x_sym[i]: x[i] for i in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for", "env = {self.x_sym[i]: x[i] for i in range(self.dim_x)} env.update({self.u_sym[i]: u[i]", "torch.vstack(( v * torch.cos(heading), v * torch.sin(heading), v * torch.tan(steer),", "* ps.cos(heading), v * ps.sin(heading), v * ps.tan(steer), u[0], u[1]", "5 self.dim_u = 2 \"\"\"Jacobian computations\"\"\" self.x_sym = np.array([ps.Variable(\"x_{}\".format(i)) for", "f_x = ps.Evaluate(self.jacobian_xu_sym, env) return f_x def jacobian_xu_batch(self, x, u):", "[acceleration, steering_velocity] \"\"\" self.h = h self.dim_x = 5 self.dim_u", "Batch dynamics. Uses pytorch for -args: x (np.array, dim: B", "* dxdt return x_new def dynamics(self, x, u): \"\"\" Numeric", "x[:,4] dxdt = np.vstack(( v * np.cos(heading), v * np.sin(heading),", "next state \"\"\" x = torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading", "np import pydrake.symbolic as ps import torch import time from", "torch.cos(heading), v * torch.sin(heading), v * torch.tan(steer), u[:,0], u[:,1] )).T", "= x[4] dxdt = np.array([ v * ps.cos(heading), v *", "dxdt = np.vstack(( v * np.cos(heading), v * np.sin(heading), v", "u \"\"\" dxdu_batch = np.zeros(( x.shape[0], x.shape[1], x.shape[1] + u.shape[1]))", "as a function of x, u \"\"\" env = {self.x_sym[i]:", "(np.array, dim: B x n): batched state u (np.array, dim:", "u): \"\"\" Recoever linearized dynamics dfd(xu) as a function of", "self.u_sym = np.array([ps.Variable(\"u_{}\".format(i)) for i in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym,", "u[i] for i in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env) return", "= ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x, u): \"\"\" Symbolic", "i in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env) return f_x def", "* np.tan(steer), u[:,0], u[:,1] )).transpose() x_new = x + self.h", "x[:,4] dxdt = torch.vstack(( v * torch.cos(heading), v * torch.sin(heading),", "steering_velocity] \"\"\" self.h = h self.dim_x = 5 self.dim_u =", "v * np.tan(steer), u[0], u[1] ]) x_new = x +", "import DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__() \"\"\" x", "x[4] dxdt = np.array([ v * np.cos(heading), v * np.sin(heading),", "a function of x, u \"\"\" env = {self.x_sym[i]: x[i]", "from irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__()", "-args: x (np.array, dim: B x n): batched state u", "= torch.vstack(( v * torch.cos(heading), v * torch.sin(heading), v *", "range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym,", "= [acceleration, steering_velocity] \"\"\" self.h = h self.dim_x = 5", "state u (np.array, dim: B x m): batched input -returns:", "v = x[3] steer = x[4] dxdt = np.array([ v", "v * np.sin(heading), v * np.tan(steer), u[:,0], u[:,1] )).transpose() x_new", "u): \"\"\" Numeric expression for dynamics. x (np.array, dim: n):", "for i in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym =", "= [x pos, y pos, heading, speed, steering_angle] u =", "* dxdt return x_new def dynamics_batch(self, x, u): \"\"\" Batch", "dynamics_batch(self, x, u): \"\"\" Batch dynamics. Uses pytorch for -args:", "u[:,0], u[:,1] )).transpose() x_new = x + self.h * dxdt", "dxdt = torch.vstack(( v * torch.cos(heading), v * torch.sin(heading), v", "range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env) return f_x def jacobian_xu_batch(self, x,", "(np.array, dim: B x n): batched next state \"\"\" x", "heading = x[2] v = x[3] steer = x[4] dxdt", "* torch.cos(heading), v * torch.sin(heading), v * torch.tan(steer), u[:,0], u[:,1]", "np.zeros(( x.shape[0], x.shape[1], x.shape[1] + u.shape[1])) for i in range(x.shape[0]):", "v * ps.sin(heading), v * ps.tan(steer), u[0], u[1] ]) x_new", "m): action \"\"\" heading = x[2] v = x[3] steer", "x + self.h * dxdt return x_new def dynamics_batch(self, x,", "(np.array, dim: n): state u (np.array, dim: m): action \"\"\"", "x[:,3] steer = x[:,4] dxdt = np.vstack(( v * np.cos(heading),", "\"\"\" dxdu_batch = np.zeros(( x.shape[0], x.shape[1], x.shape[1] + u.shape[1])) for", "dynamics(self, x, u): \"\"\" Numeric expression for dynamics. x (np.array,", "i in range(self.dim_x)]) self.u_sym = np.array([ps.Variable(\"u_{}\".format(i)) for i in range(self.dim_u)])", "* np.sin(heading), v * np.tan(steer), u[0], u[1] ]) x_new =", "np.array([ v * np.cos(heading), v * np.sin(heading), v * np.tan(steer),", "= np.array([ps.Variable(\"x_{}\".format(i)) for i in range(self.dim_x)]) self.u_sym = np.array([ps.Variable(\"u_{}\".format(i)) for", "x[4] dxdt = np.array([ v * ps.cos(heading), v * ps.sin(heading),", "dfdx as a function of x, u \"\"\" env =", "self.u_sym))) def dynamics_sym(self, x, u): \"\"\" Symbolic expression for dynamics.", "dxdt = np.array([ v * ps.cos(heading), v * ps.sin(heading), v", "= np.array([ v * np.cos(heading), v * np.sin(heading), v *", "dim: m): action \"\"\" heading = x[2] v = x[3]", "u): \"\"\" Symbolic expression for dynamics. Used to compute linearizations", "x[i] for i in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i in", "jacobian_xu(self, x, u): \"\"\" Recoever linearized dynamics dfdx as a", "as a function of x, u \"\"\" dxdu_batch = np.zeros((", "np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[0], u[1] ])", "import torch import time from irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem):", "-returns: xnext (np.array, dim: B x n): batched next state", "xnext (np.array, dim: B x n): batched next state \"\"\"", "f_x def jacobian_xu_batch(self, x, u): \"\"\" Recoever linearized dynamics dfd(xu)", "np.array([ps.Variable(\"u_{}\".format(i)) for i in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym", "in range(self.dim_x)]) self.u_sym = np.array([ps.Variable(\"u_{}\".format(i)) for i in range(self.dim_u)]) self.f_sym", "action \"\"\" heading = x[2] v = x[3] steer =", "v = x[:,3] steer = x[:,4] dxdt = torch.vstack(( v", "* dxdt return x_new def jacobian_xu(self, x, u): \"\"\" Recoever", "h): super().__init__() \"\"\" x = [x pos, y pos, heading,", "next state \"\"\" heading = x[:,2] v = x[:,3] steer", "u[1] ]) x_new = x + self.h * dxdt return", "= x[4] dxdt = np.array([ v * np.cos(heading), v *", "to compute linearizations of the system. x (np.array, dim: n):", "\"\"\" x = torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading = x[:,2]", "y pos, heading, speed, steering_angle] u = [acceleration, steering_velocity] \"\"\"", "heading = x[:,2] v = x[:,3] steer = x[:,4] dxdt", "dim: B x n): batched next state \"\"\" x =", "for dynamics. Used to compute linearizations of the system. x", "+ self.h * dxdt return x_new def dynamics_batch_torch(self, x, u):", "batched state u (np.array, dim: B x m): batched input", "dxdt return x_new def jacobian_xu(self, x, u): \"\"\" Recoever linearized", "= self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self,", "time from irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self, h):", "x_new def jacobian_xu(self, x, u): \"\"\" Recoever linearized dynamics dfdx", "x[:,2] v = x[:,3] steer = x[:,4] dxdt = np.vstack((", "dim: B x n): batched next state \"\"\" heading =", "x = torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading = x[:,2] v", "ps.cos(heading), v * ps.sin(heading), v * ps.tan(steer), u[0], u[1] ])", "\"\"\" Recoever linearized dynamics dfd(xu) as a function of x,", "return x_new def dynamics_batch(self, x, u): \"\"\" Batch dynamics. Uses", "= x[:,3] steer = x[:,4] dxdt = torch.vstack(( v *", "self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x, u): \"\"\"", "np.sin(heading), v * np.tan(steer), u[0], u[1] ]) x_new = x", "u[:,1] )).T x_new = x + self.h * dxdt return", "i in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym,", "B x m): batched input -returns: xnext (np.array, dim: B", "\"\"\" Batch dynamics. Uses pytorch for -args: x (np.array, dim:", "def jacobian_xu(self, x, u): \"\"\" Recoever linearized dynamics dfdx as", "ps.sin(heading), v * ps.tan(steer), u[0], u[1] ]) x_new = x", "Numeric expression for dynamics. x (np.array, dim: n): state u", "pos, y pos, heading, speed, steering_angle] u = [acceleration, steering_velocity]", "u[0], u[1] ]) x_new = x + self.h * dxdt", "x_new = x + self.h * dxdt return x_new def", "ps import torch import time from irs_lqr.dynamical_system import DynamicalSystem class", "np.tan(steer), u[0], u[1] ]) x_new = x + self.h *", "for dynamics. x (np.array, dim: n): state u (np.array, dim:", "heading, speed, steering_angle] u = [acceleration, steering_velocity] \"\"\" self.h =", "in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym,", "jacobian_xu_batch(self, x, u): \"\"\" Recoever linearized dynamics dfd(xu) as a", "linearized dynamics dfdx as a function of x, u \"\"\"", "v * np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[0],", "import pydrake.symbolic as ps import torch import time from irs_lqr.dynamical_system", "x_new def dynamics_batch(self, x, u): \"\"\" Batch dynamics. Uses pytorch", "+ self.h * dxdt return x_new def jacobian_xu(self, x, u):", "= x[:,4] dxdt = torch.vstack(( v * torch.cos(heading), v *", "B x n): batched next state \"\"\" x = torch.Tensor(x).cuda()", "in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env) return f_x def jacobian_xu_batch(self,", "state u (np.array, dim: m): action \"\"\" heading = x[2]", "dynamics_sym(self, x, u): \"\"\" Symbolic expression for dynamics. Used to", "dim: n): state u (np.array, dim: m): action \"\"\" heading", "def dynamics_sym(self, x, u): \"\"\" Symbolic expression for dynamics. Used", "as ps import torch import time from irs_lqr.dynamical_system import DynamicalSystem", "x_new def dynamics_batch_torch(self, x, u): \"\"\" Batch dynamics. Uses pytorch", "* np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[0], u[1]", "= x + self.h * dxdt return x_new def dynamics_batch_torch(self,", "]) x_new = x + self.h * dxdt return x_new", "x n): batched state u (np.array, dim: B x m):", "range(self.dim_x)]) self.u_sym = np.array([ps.Variable(\"u_{}\".format(i)) for i in range(self.dim_u)]) self.f_sym =", "def dynamics_batch_torch(self, x, u): \"\"\" Batch dynamics. Uses pytorch for", "x m): batched input -returns: xnext (np.array, dim: B x", "pos, heading, speed, steering_angle] u = [acceleration, steering_velocity] \"\"\" self.h", "x, u \"\"\" dxdu_batch = np.zeros(( x.shape[0], x.shape[1], x.shape[1] +", "x, u): \"\"\" Numeric expression for dynamics. x (np.array, dim:", "x, u \"\"\" env = {self.x_sym[i]: x[i] for i in", "np.sin(heading), v * np.tan(steer), u[:,0], u[:,1] )).transpose() x_new = x", "u (np.array, dim: m): action \"\"\" heading = x[2] v", "= torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading = x[:,2] v =", "function of x, u \"\"\" dxdu_batch = np.zeros(( x.shape[0], x.shape[1],", "x[3] steer = x[4] dxdt = np.array([ v * np.cos(heading),", "torch import time from irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem): def", "x[:,2] v = x[:,3] steer = x[:,4] dxdt = torch.vstack((", "self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x,", "[x pos, y pos, heading, speed, steering_angle] u = [acceleration,", "dxdt return x_new def dynamics_batch_torch(self, x, u): \"\"\" Batch dynamics.", ")).transpose() x_new = x + self.h * dxdt return x_new", "speed, steering_angle] u = [acceleration, steering_velocity] \"\"\" self.h = h", "u[:,0], u[:,1] )).T x_new = x + self.h * dxdt", "steering_angle] u = [acceleration, steering_velocity] \"\"\" self.h = h self.dim_x", "return x_new def dynamics_batch_torch(self, x, u): \"\"\" Batch dynamics. Uses", "x + self.h * dxdt return x_new def dynamics_batch_torch(self, x,", "+ u.shape[1])) for i in range(x.shape[0]): dxdu_batch[i] = self.jacobian_xu(x[i], u[i])", "dxdu_batch = np.zeros(( x.shape[0], x.shape[1], x.shape[1] + u.shape[1])) for i", "n): batched next state \"\"\" heading = x[:,2] v =", "x + self.h * dxdt return x_new def jacobian_xu(self, x,", "__init__(self, h): super().__init__() \"\"\" x = [x pos, y pos,", "x + self.h * dxdt return x_new def dynamics(self, x,", "= 2 \"\"\"Jacobian computations\"\"\" self.x_sym = np.array([ps.Variable(\"x_{}\".format(i)) for i in", "self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x, u):", "steer = x[:,4] dxdt = np.vstack(( v * np.cos(heading), v", "torch.tan(steer), u[:,0], u[:,1] )).T x_new = x + self.h *", "function of x, u \"\"\" env = {self.x_sym[i]: x[i] for", "pytorch for -args: x (np.array, dim: B x n): batched", "* torch.sin(heading), v * torch.tan(steer), u[:,0], u[:,1] )).T x_new =", "(np.array, dim: m): action \"\"\" heading = x[2] v =", "u (np.array, dim: B x m): batched input -returns: xnext", "state \"\"\" x = torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading =", "u): \"\"\" Recoever linearized dynamics dfdx as a function of", "for i in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env) return f_x", "torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading = x[:,2] v = x[:,3]", "x[:,3] steer = x[:,4] dxdt = torch.vstack(( v * torch.cos(heading),", "return x_new def dynamics(self, x, u): \"\"\" Numeric expression for", "expression for dynamics. x (np.array, dim: n): state u (np.array,", "dynamics. Uses pytorch for -args: x (np.array, dim: B x", "x n): batched next state \"\"\" heading = x[:,2] v", "dynamics_batch_torch(self, x, u): \"\"\" Batch dynamics. Uses pytorch for -args:", "linearizations of the system. x (np.array, dim: n): state u", "\"\"\" self.h = h self.dim_x = 5 self.dim_u = 2", "self.h * dxdt return x_new def dynamics(self, x, u): \"\"\"", "= x[3] steer = x[4] dxdt = np.array([ v *", "v * ps.tan(steer), u[0], u[1] ]) x_new = x +", "v * np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[:,0],", "expression for dynamics. Used to compute linearizations of the system.", "a function of x, u \"\"\" dxdu_batch = np.zeros(( x.shape[0],", "irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__() \"\"\"", "+ self.h * dxdt return x_new def dynamics_batch(self, x, u):", "steer = x[:,4] dxdt = torch.vstack(( v * torch.cos(heading), v", "m): batched input -returns: xnext (np.array, dim: B x n):", "Recoever linearized dynamics dfd(xu) as a function of x, u", "2 \"\"\"Jacobian computations\"\"\" self.x_sym = np.array([ps.Variable(\"x_{}\".format(i)) for i in range(self.dim_x)])", "torch.sin(heading), v * torch.tan(steer), u[:,0], u[:,1] )).T x_new = x", "x_new def dynamics(self, x, u): \"\"\" Numeric expression for dynamics.", "self.h = h self.dim_x = 5 self.dim_u = 2 \"\"\"Jacobian", "for -args: x (np.array, dim: B x n): batched state", "= np.array([ v * ps.cos(heading), v * ps.sin(heading), v *", "* np.sin(heading), v * np.tan(steer), u[:,0], u[:,1] )).transpose() x_new =", "of x, u \"\"\" env = {self.x_sym[i]: x[i] for i", "torch.Tensor(u).cuda() heading = x[:,2] v = x[:,3] steer = x[:,4]", "def dynamics(self, x, u): \"\"\" Numeric expression for dynamics. x", "\"\"\" Symbolic expression for dynamics. Used to compute linearizations of", "as np import pydrake.symbolic as ps import torch import time", "pydrake.symbolic as ps import torch import time from irs_lqr.dynamical_system import", "(np.array, dim: B x m): batched input -returns: xnext (np.array,", "for i in range(self.dim_x)]) self.u_sym = np.array([ps.Variable(\"u_{}\".format(i)) for i in", "\"\"\" heading = x[:,2] v = x[:,3] steer = x[:,4]", "for i in range(x.shape[0]): dxdu_batch[i] = self.jacobian_xu(x[i], u[i]) return dxdu_batch", "B x n): batched next state \"\"\" heading = x[:,2]", "np.array([ps.Variable(\"x_{}\".format(i)) for i in range(self.dim_x)]) self.u_sym = np.array([ps.Variable(\"u_{}\".format(i)) for i", "ps.tan(steer), u[0], u[1] ]) x_new = x + self.h *", "dxdt return x_new def dynamics(self, x, u): \"\"\" Numeric expression", "linearized dynamics dfd(xu) as a function of x, u \"\"\"", "steer = x[4] dxdt = np.array([ v * ps.cos(heading), v", "= np.vstack(( v * np.cos(heading), v * np.sin(heading), v *", "u[:,1] )).transpose() x_new = x + self.h * dxdt return", "= x + self.h * dxdt return x_new def jacobian_xu(self,", "x (np.array, dim: n): state u (np.array, dim: m): action", "x (np.array, dim: B x n): batched state u (np.array,", "Recoever linearized dynamics dfdx as a function of x, u", "= ps.Evaluate(self.jacobian_xu_sym, env) return f_x def jacobian_xu_batch(self, x, u): \"\"\"", "of x, u \"\"\" dxdu_batch = np.zeros(( x.shape[0], x.shape[1], x.shape[1]", "n): batched state u (np.array, dim: B x m): batched", "class BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__() \"\"\" x = [x", "dfd(xu) as a function of x, u \"\"\" dxdu_batch =", "= h self.dim_x = 5 self.dim_u = 2 \"\"\"Jacobian computations\"\"\"", "dynamics. x (np.array, dim: n): state u (np.array, dim: m):", "ps.Evaluate(self.jacobian_xu_sym, env) return f_x def jacobian_xu_batch(self, x, u): \"\"\" Recoever", "* dxdt return x_new def dynamics_batch_torch(self, x, u): \"\"\" Batch", "return f_x def jacobian_xu_batch(self, x, u): \"\"\" Recoever linearized dynamics", "= x + self.h * dxdt return x_new def dynamics(self,", "dim: B x n): batched state u (np.array, dim: B", "= np.array([ps.Variable(\"u_{}\".format(i)) for i in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym)", "= x[:,4] dxdt = np.vstack(( v * np.cos(heading), v *", "self.h * dxdt return x_new def dynamics_batch_torch(self, x, u): \"\"\"", "state \"\"\" heading = x[:,2] v = x[:,3] steer =", "self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def", "x.shape[0], x.shape[1], x.shape[1] + u.shape[1])) for i in range(x.shape[0]): dxdu_batch[i]", "* np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[:,0], u[:,1]", "v = x[:,3] steer = x[:,4] dxdt = np.vstack(( v", "u = torch.Tensor(u).cuda() heading = x[:,2] v = x[:,3] steer", "numpy as np import pydrake.symbolic as ps import torch import", "B x n): batched state u (np.array, dim: B x", "self.x_sym = np.array([ps.Variable(\"x_{}\".format(i)) for i in range(self.dim_x)]) self.u_sym = np.array([ps.Variable(\"u_{}\".format(i))", "Used to compute linearizations of the system. x (np.array, dim:", "\"\"\" x = [x pos, y pos, heading, speed, steering_angle]", "= x[2] v = x[3] steer = x[4] dxdt =", "DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__() \"\"\" x =", "the system. x (np.array, dim: n): state u (np.array, dim:", "np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[:,0], u[:,1] )).transpose()", "import numpy as np import pydrake.symbolic as ps import torch", "x = [x pos, y pos, heading, speed, steering_angle] u", "x[3] steer = x[4] dxdt = np.array([ v * ps.cos(heading),", "u = [acceleration, steering_velocity] \"\"\" self.h = h self.dim_x =", "env) return f_x def jacobian_xu_batch(self, x, u): \"\"\" Recoever linearized", "system. x (np.array, dim: n): state u (np.array, dim: m):", "import time from irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self,", "v * np.tan(steer), u[:,0], u[:,1] )).transpose() x_new = x +", "x, u): \"\"\" Recoever linearized dynamics dfdx as a function", "\"\"\" Numeric expression for dynamics. x (np.array, dim: n): state", "np.array([ v * ps.cos(heading), v * ps.sin(heading), v * ps.tan(steer),", "dynamics. Used to compute linearizations of the system. x (np.array,", "= x[:,3] steer = x[:,4] dxdt = np.vstack(( v *", "range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym)))", "batched next state \"\"\" heading = x[:,2] v = x[:,3]", "return x_new def jacobian_xu(self, x, u): \"\"\" Recoever linearized dynamics", "dynamics dfd(xu) as a function of x, u \"\"\" dxdu_batch", "self.dim_x = 5 self.dim_u = 2 \"\"\"Jacobian computations\"\"\" self.x_sym =", "x[2] v = x[3] steer = x[4] dxdt = np.array([", "computations\"\"\" self.x_sym = np.array([ps.Variable(\"x_{}\".format(i)) for i in range(self.dim_x)]) self.u_sym =", "u): \"\"\" Batch dynamics. Uses pytorch for -args: x (np.array,", "Uses pytorch for -args: x (np.array, dim: B x n):", "BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__() \"\"\" x = [x pos,", "input -returns: xnext (np.array, dim: B x n): batched next", "+ self.h * dxdt return x_new def dynamics(self, x, u):", "= x + self.h * dxdt return x_new def dynamics_batch(self,", "dim: B x m): batched input -returns: xnext (np.array, dim:", "super().__init__() \"\"\" x = [x pos, y pos, heading, speed,", "x, u): \"\"\" Recoever linearized dynamics dfd(xu) as a function", ")).T x_new = x + self.h * dxdt return x_new", "u.shape[1])) for i in range(x.shape[0]): dxdu_batch[i] = self.jacobian_xu(x[i], u[i]) return", "{self.x_sym[i]: x[i] for i in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i", "self.h * dxdt return x_new def jacobian_xu(self, x, u): \"\"\"", "* ps.tan(steer), u[0], u[1] ]) x_new = x + self.h", "steer = x[4] dxdt = np.array([ v * np.cos(heading), v", "batched next state \"\"\" x = torch.Tensor(x).cuda() u = torch.Tensor(u).cuda()", "compute linearizations of the system. x (np.array, dim: n): state", "h self.dim_x = 5 self.dim_u = 2 \"\"\"Jacobian computations\"\"\" self.x_sym", "x, u): \"\"\" Symbolic expression for dynamics. Used to compute", "np.vstack(( v * np.cos(heading), v * np.sin(heading), v * np.tan(steer),", "dxdt return x_new def dynamics_batch(self, x, u): \"\"\" Batch dynamics.", "* np.tan(steer), u[0], u[1] ]) x_new = x + self.h", "u \"\"\" env = {self.x_sym[i]: x[i] for i in range(self.dim_x)}", "(np.array, dim: B x n): batched next state \"\"\" heading", "x, u): \"\"\" Batch dynamics. Uses pytorch for -args: x", "\"\"\"Jacobian computations\"\"\" self.x_sym = np.array([ps.Variable(\"x_{}\".format(i)) for i in range(self.dim_x)]) self.u_sym", "dynamics dfdx as a function of x, u \"\"\" env", "np.tan(steer), u[:,0], u[:,1] )).transpose() x_new = x + self.h *", "v * torch.sin(heading), v * torch.tan(steer), u[:,0], u[:,1] )).T x_new", "\"\"\" heading = x[2] v = x[3] steer = x[4]", "self.dim_u = 2 \"\"\"Jacobian computations\"\"\" self.x_sym = np.array([ps.Variable(\"x_{}\".format(i)) for i", "ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x, u): \"\"\" Symbolic expression", "x.shape[1], x.shape[1] + u.shape[1])) for i in range(x.shape[0]): dxdu_batch[i] =", "dxdt = np.array([ v * np.cos(heading), v * np.sin(heading), v", "batched input -returns: xnext (np.array, dim: B x n): batched", "n): batched next state \"\"\" x = torch.Tensor(x).cuda() u =", "* torch.tan(steer), u[:,0], u[:,1] )).T x_new = x + self.h", "v * torch.cos(heading), v * torch.sin(heading), v * torch.tan(steer), u[:,0],", "of the system. x (np.array, dim: n): state u (np.array,", "Symbolic expression for dynamics. Used to compute linearizations of the", "= x[:,2] v = x[:,3] steer = x[:,4] dxdt =", "\"\"\" env = {self.x_sym[i]: x[i] for i in range(self.dim_x)} env.update({self.u_sym[i]:", "self.h * dxdt return x_new def dynamics_batch(self, x, u): \"\"\"", "= torch.Tensor(u).cuda() heading = x[:,2] v = x[:,3] steer =", "v * torch.tan(steer), u[:,0], u[:,1] )).T x_new = x +", "for i in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)})", "* ps.sin(heading), v * ps.tan(steer), u[0], u[1] ]) x_new =", "v * ps.cos(heading), v * ps.sin(heading), v * ps.tan(steer), u[0],", "def __init__(self, h): super().__init__() \"\"\" x = [x pos, y", "v * np.sin(heading), v * np.tan(steer), u[0], u[1] ]) x_new", "def jacobian_xu_batch(self, x, u): \"\"\" Recoever linearized dynamics dfd(xu) as", "<gh_stars>1-10 import numpy as np import pydrake.symbolic as ps import", "x n): batched next state \"\"\" x = torch.Tensor(x).cuda() u", "in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)}) f_x =", "n): state u (np.array, dim: m): action \"\"\" heading =", "i in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)}) f_x", "= 5 self.dim_u = 2 \"\"\"Jacobian computations\"\"\" self.x_sym = np.array([ps.Variable(\"x_{}\".format(i))", "= np.zeros(( x.shape[0], x.shape[1], x.shape[1] + u.shape[1])) for i in", "np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x, u): \"\"\" Symbolic expression for" ]
[ "xlim = (-4,cz)) if tail_choice == \"Two Tails\": pv =", "pd.DataFrame({\"x\":x,\"y\":y}) normp = ggplot(ndf) + coord_fixed(ratio = 4) if tail_choice", "import math from scipy.stats import * import pandas as pd", "of the app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\") prop_choice = st.sidebar.radio(\"\",[\"One Proportion\",\"Two", "(-4,cz)) if tail_choice == \"Two Tails\": pv = 2*(1-norm.cdf(abs(z))) cz", "cise rme = \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI", "- nullp)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf =", "\"area\",fill = \"steelblue\", xlim = (abs(z),4)) normp = normp +", "cise rme = \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat", "= \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index =", "one[0]: p_hat1 = x1/n1 q_hat1 = 1 -p_hat1 p_hat2 =", "(-4,-1*abs(cz))) normp = normp + stat_function(fun = norm.pdf, geom =", "normp = normp + geom_segment(aes(x = z, y = 0,", "cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 - p_hat2)/tsd x =", "\"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test", "numpy as np from plotnine import * def app(): #", "= st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"]) if prop_choice == \"One Proportion\": c1,c2,c3", "def app(): # title of the app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\")", "1\",25)) with c2: x2 = int(st.text_input(\"Hits 2\",30)) n2 = int(st.text_input(\"Tries", "= int(st.text_input(\"Tries 2\",50)) with c3: alpha = float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a", "geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat - abs(me) upper = p_hat", "= 1 - alpha normp = normp + stat_function(fun =", "= st.columns(3) with c1: x = int(st.text_input(\"Hits\",20)) n = int(st.text_input(\"Tries\",25))", "me = cz * cise rme = \"±\" + str(abs(me))", "= (z,4)) normp = normp + stat_function(fun = norm.pdf, geom", "st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"]) if prop_choice == \"One Proportion\": c1,c2,c3 =", "= (abs(cz),4)) if tail_choice == \"Right Tail\": pv = 1", "float(st.text_input(\"Null:\",.7)) alpha = float(st.text_input(\"Alpha\",.05)) with c3: st.markdown(\"Pick a test:\") tail_choice", "= x1/n1 q_hat1 = 1 -p_hat1 p_hat2 = x2/n2 q_hat2", "== \"Left Tail\": pv = norm.cdf(z) cz = norm.ppf(alpha) rcz", "as st import math from scipy.stats import * import pandas", "p_hat + abs(me) st.write(str(100*cl) + \"'%' confidence interval is (\"", "with c2: x2 = int(st.text_input(\"Hits 2\",30)) n2 = int(st.text_input(\"Tries 2\",50))", "(cz,4)) me = cz * cise rme = \"±\" +", "\"Right Tail\": pv = 1 - norm.cdf(z) cz = -1", "Tails\",\"Right Tail\"]) one = st.columns(1) with one[0]: p_hat = x/n", "= \"steelblue\", xlim = (-4,-1*abs(z))) normp = normp + stat_function(fun", "interval is (\" + str(lower) +\", \"+str(upper)+\")\") if prop_choice ==", "lower = p_hat - abs(me) upper = p_hat + abs(me)", "\"steelblue\", xlim = (z,4)) normp = normp + stat_function(fun =", "with c3: st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right", "st.write(str(100*cl) + \"'%' confidence interval is (\" + str(lower) +\",", "= norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y}) normp = ggplot(ndf) + coord_fixed(ratio", "= norm.pdf, geom = \"area\",fill = \"orange\", xlim = (abs(cz),4))", "(abs(z),4)) normp = normp + stat_function(fun = norm.pdf, geom =", "one[0]: p_hat = x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n)", "if tail_choice == \"Right Tail\": pv = 1 - norm.cdf(z)", "= pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp = normp", "p_hat2 pq_hat = 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2)", "= math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 - p_hat2)/tsd", "int(st.text_input(\"Hits\",20)) n = int(st.text_input(\"Tries\",25)) with c2: nullp = float(st.text_input(\"Null:\",.7)) alpha", "with one[0]: p_hat1 = x1/n1 q_hat1 = 1 -p_hat1 p_hat2", "= \"orange\", xlim = (-4,-1*abs(cz))) normp = normp + stat_function(fun", "= int(st.text_input(\"Hits\",20)) n = int(st.text_input(\"Tries\",25)) with c2: nullp = float(st.text_input(\"Null:\",.7))", "with c3: alpha = float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a test:\") tail_choice =", "p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp = normp +", "stat_function(fun = norm.pdf, geom = \"area\",fill = \"orange\", xlim =", "+ coord_fixed(ratio = 4) if tail_choice == \"Left Tail\": pv", "st.pyplot(ggplot.draw(normp)) lower = dp_hat - abs(me) upper = dp_hat +", "n2 = int(st.text_input(\"Tries 2\",50)) with c3: alpha = float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick", "math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat - nullp)/tsd x = np.arange(-4,4,.1) y", "= int(st.text_input(\"Tries 1\",25)) with c2: x2 = int(st.text_input(\"Hits 2\",30)) n2", "-1 * norm.ppf(alpha) rcz = cz cl = 1 -", "* def app(): # title of the app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion", "pd import numpy as np from plotnine import * def", "norm.pdf, geom = \"area\",fill = \"orange\", xlim = (cz,4)) me", "- abs(me) upper = dp_hat + abs(me) st.write(str(100*cl) + \"'%'", "= float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right", "import * import pandas as pd import numpy as np", "z = (p_hat - nullp)/tsd x = np.arange(-4,4,.1) y =", "1 - p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat = p_hat1 -", "= \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff", "1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1", "+ abs(me) st.write(str(100*cl) + \"'%' confidence interval is (\" +", "is (\" + str(lower) +\", \"+str(upper)+\")\") if prop_choice == \"Two", "= int(st.text_input(\"Hits 1\",20)) n1 = int(st.text_input(\"Tries 1\",25)) with c2: x2", "+ str(lower) +\", \"+str(upper)+\")\") if prop_choice == \"Two Proportions\": c1,c2,c3", "= x2/n2 q_hat2 = 1 - p_hat2 pp_hat = (x1+x2)/(n1+n2)", "\"Left Tail\": pv = norm.cdf(z) cz = norm.ppf(alpha) rcz =", "= norm.cdf(z) cz = norm.ppf(alpha) rcz = cz cl =", "\"orange\", xlim = (cz,4)) me = cz * cise rme", "- 2*alpha normp = normp + stat_function(fun = norm.pdf, geom", "y = norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y}) normp = ggplot(ndf) +", "if prop_choice == \"One Proportion\": c1,c2,c3 = st.columns(3) with c1:", "= (cz,4)) me = cz * cise rme = \"±\"", "p_hat2 = x2/n2 q_hat2 = 1 - p_hat2 pp_hat =", "= normp + stat_function(fun = norm.pdf, geom = \"area\",fill =", "* norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha", "int(st.text_input(\"Tries 1\",25)) with c2: x2 = int(st.text_input(\"Hits 2\",30)) n2 =", "int(st.text_input(\"Tries 2\",50)) with c3: alpha = float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a test:\")", "= norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (-4,-1*abs(z)))", "= \"orange\", xlim = (-4,cz)) if tail_choice == \"Two Tails\":", "coord_fixed(ratio = 4) if tail_choice == \"Left Tail\": pv =", "xlim = (cz,4)) me = cz * cise rme =", "(abs(cz),4)) if tail_choice == \"Right Tail\": pv = 1 -", "cz = -1 * norm.ppf(alpha) rcz = cz cl =", "prop_choice = st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"]) if prop_choice == \"One Proportion\":", "streamlit as st import math from scipy.stats import * import", "* cise rme = \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat", "== \"One Proportion\": c1,c2,c3 = st.columns(3) with c1: x =", "\"steelblue\", xlim = (abs(z),4)) normp = normp + stat_function(fun =", "int(st.text_input(\"Hits 1\",20)) n1 = int(st.text_input(\"Tries 1\",25)) with c2: x2 =", "c1,c2,c3 = st.columns(3) with c1: x = int(st.text_input(\"Hits\",20)) n =", "= z, yend = norm.pdf(z)),color=\"red\") normp = normp + geom_line(aes(x=x,y=y))", "with c2: nullp = float(st.text_input(\"Null:\",.7)) alpha = float(st.text_input(\"Alpha\",.05)) with c3:", "- p_hat2)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf =", "Proportions\"]) if prop_choice == \"One Proportion\": c1,c2,c3 = st.columns(3) with", "geom = \"area\",fill = \"steelblue\", xlim = (z,4)) normp =", "= float(st.text_input(\"Alpha\",.05)) with c3: st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left", "Tail\"]) one = st.columns(1) with one[0]: p_hat = x/n tsd", "norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (abs(z),4)) normp", "Tail\": pv = 1 - norm.cdf(z) cz = -1 *", "data = pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index =", "= 0, xend = z, yend = norm.pdf(z)),color=\"red\") normp =", "= norm.pdf, geom = \"area\",fill = \"orange\", xlim = (-4,cz))", "alpha normp = normp + stat_function(fun = norm.pdf, geom =", "1 - alpha normp = normp + stat_function(fun = norm.pdf,", "(-4,-1*abs(z))) normp = normp + stat_function(fun = norm.pdf, geom =", "pv = norm.cdf(z) cz = norm.ppf(alpha) rcz = cz cl", "normp = normp + stat_function(fun = norm.pdf, geom = \"area\",fill", "pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data)", "\"orange\", xlim = (-4,-1*abs(cz))) normp = normp + stat_function(fun =", "SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp = normp + geom_segment(aes(x =", "= \"area\",fill = \"steelblue\", xlim = (abs(z),4)) normp = normp", "tail_choice == \"Left Tail\": pv = norm.cdf(z) cz = norm.ppf(alpha)", "float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"])", "n1 = int(st.text_input(\"Tries 1\",25)) with c2: x2 = int(st.text_input(\"Hits 2\",30))", "plotnine import * def app(): # title of the app", "\"Two Proportions\": c1,c2,c3 = st.columns(3) with c1: x1 = int(st.text_input(\"Hits", "= int(st.text_input(\"Tries\",25)) with c2: nullp = float(st.text_input(\"Null:\",.7)) alpha = float(st.text_input(\"Alpha\",.05))", "= -1 * norm.ppf(alpha) rcz = cz cl = 1", "- norm.cdf(z) cz = -1 * norm.ppf(alpha) rcz = cz", "= norm.pdf, geom = \"area\",fill = \"orange\", xlim = (cz,4))", "tsd = math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat -", "math from scipy.stats import * import pandas as pd import", "data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp =", "y = 0, xend = z, yend = norm.pdf(z)),color=\"red\") normp", "np from plotnine import * def app(): # title of", "np.arange(-4,4,.1) y = norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y}) normp = ggplot(ndf)", "geom = \"area\",fill = \"steelblue\", xlim = (-4,-1*abs(z))) normp =", "= 1 - p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat = p_hat1", "(p_hat - nullp)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf", "== \"Two Tails\": pv = 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz", "prop_choice == \"One Proportion\": c1,c2,c3 = st.columns(3) with c1: x", "0, xend = z, yend = norm.pdf(z)),color=\"red\") normp = normp", "yend = norm.pdf(z)),color=\"red\") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower", "= p_hat + abs(me) st.write(str(100*cl) + \"'%' confidence interval is", "\"orange\", xlim = (-4,cz)) if tail_choice == \"Two Tails\": pv", "int(st.text_input(\"Tries\",25)) with c2: nullp = float(st.text_input(\"Null:\",.7)) alpha = float(st.text_input(\"Alpha\",.05)) with", "= dp_hat - abs(me) upper = dp_hat + abs(me) st.write(str(100*cl)", "\"steelblue\", xlim = (-4,z)) normp = normp + stat_function(fun =", "Proportion\",\"Two Proportions\"]) if prop_choice == \"One Proportion\": c1,c2,c3 = st.columns(3)", "as pd import numpy as np from plotnine import *", "geom = \"area\",fill = \"orange\", xlim = (cz,4)) me =", "str(abs(me)) data = pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index", "= 1 - norm.cdf(z) cz = -1 * norm.ppf(alpha) rcz", "nullp)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y})", "= norm.pdf(z)),color=\"red\") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower =", "\"One Proportion\": c1,c2,c3 = st.columns(3) with c1: x = int(st.text_input(\"Hits\",20))", "p_hat2)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y})", "c2: nullp = float(st.text_input(\"Null:\",.7)) alpha = float(st.text_input(\"Alpha\",.05)) with c3: st.markdown(\"Pick", "= math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat - nullp)/tsd x = np.arange(-4,4,.1)", "= \"area\",fill = \"steelblue\", xlim = (-4,-1*abs(z))) normp = normp", "upper = dp_hat + abs(me) st.write(str(100*cl) + \"'%' confidence interval", "2*alpha normp = normp + stat_function(fun = norm.pdf, geom =", "= ggplot(ndf) + coord_fixed(ratio = 4) if tail_choice == \"Left", "alpha = float(st.text_input(\"Alpha\",.05)) with c3: st.markdown(\"Pick a test:\") tail_choice =", "2\",30)) n2 = int(st.text_input(\"Tries 2\",50)) with c3: alpha = float(st.text_input(\"Alpha\",.05))", "Proportions\": c1,c2,c3 = st.columns(3) with c1: x1 = int(st.text_input(\"Hits 1\",20))", "import pandas as pd import numpy as np from plotnine", "Tails\": pv = 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = \"±\"", "norm.pdf, geom = \"area\",fill = \"orange\", xlim = (abs(cz),4)) if", "- p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat = p_hat1 - p_hat2", "z, y = 0, xend = z, yend = norm.pdf(z)),color=\"red\")", "c1: x1 = int(st.text_input(\"Hits 1\",20)) n1 = int(st.text_input(\"Tries 1\",25)) with", "geom = \"area\",fill = \"orange\", xlim = (abs(cz),4)) if tail_choice", "abs(norm.ppf(alpha/2)) rcz = \"±\" + str(abs(norm.ppf(alpha/2))) cl = 1 -", "= \"area\",fill = \"orange\", xlim = (abs(cz),4)) if tail_choice ==", "norm.pdf, geom = \"area\",fill = \"orange\", xlim = (-4,cz)) if", "geom = \"area\",fill = \"orange\", xlim = (-4,-1*abs(cz))) normp =", "cise = math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat - nullp)/tsd x =", "\"Two Tails\": pv = 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz =", "= \"area\",fill = \"steelblue\", xlim = (z,4)) normp = normp", "prop_choice == \"Two Proportions\": c1,c2,c3 = st.columns(3) with c1: x1", "\"±\" + str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp =", "x2/n2 q_hat2 = 1 - p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat", "norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha normp", "rcz = cz cl = 1 - 2*alpha normp =", "st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"]) one = st.columns(1) with one[0]: p_hat", "cl = 1 - alpha normp = normp + stat_function(fun", "pp_hat = (x1+x2)/(n1+n2) dp_hat = p_hat1 - p_hat2 pq_hat =", "x = np.arange(-4,4,.1) y = norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y}) normp", "1 -p_hat1 p_hat2 = x2/n2 q_hat2 = 1 - p_hat2", "= st.columns(3) with c1: x1 = int(st.text_input(\"Hits 1\",20)) n1 =", "c1: x = int(st.text_input(\"Hits\",20)) n = int(st.text_input(\"Tries\",25)) with c2: nullp", "= p_hat1 - p_hat2 pq_hat = 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2))", "z, yend = norm.pdf(z)),color=\"red\") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp))", "if prop_choice == \"Two Proportions\": c1,c2,c3 = st.columns(3) with c1:", "dp_hat - abs(me) upper = dp_hat + abs(me) st.write(str(100*cl) +", "Tail\"]) one = st.columns(1) with one[0]: p_hat1 = x1/n1 q_hat1", "2\",50)) with c3: alpha = float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a test:\") tail_choice", "= norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (-4,z))", "cz = abs(norm.ppf(alpha/2)) rcz = \"±\" + str(abs(norm.ppf(alpha/2))) cl =", "xend = z, yend = norm.pdf(z)),color=\"red\") normp = normp +", "if tail_choice == \"Left Tail\": pv = norm.cdf(z) cz =", "with c1: x = int(st.text_input(\"Hits\",20)) n = int(st.text_input(\"Tries\",25)) with c2:", "= (p_hat1 - p_hat2)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x)", "\"orange\", xlim = (abs(cz),4)) if tail_choice == \"Right Tail\": pv", "tail_choice == \"Two Tails\": pv = 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2))", "xlim = (-4,-1*abs(cz))) normp = normp + stat_function(fun = norm.pdf,", "# title of the app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\") prop_choice =", "= 4) if tail_choice == \"Left Tail\": pv = norm.cdf(z)", "as np from plotnine import * def app(): # title", "abs(me) upper = p_hat + abs(me) st.write(str(100*cl) + \"'%' confidence", "\"area\",fill = \"orange\", xlim = (-4,cz)) if tail_choice == \"Two", "the app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\") prop_choice = st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"])", "confidence interval is (\" + str(lower) +\", \"+str(upper)+\")\") if prop_choice", "st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\") prop_choice = st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"]) if prop_choice", "int(st.text_input(\"Hits 2\",30)) n2 = int(st.text_input(\"Tries 2\",50)) with c3: alpha =", "Tails\",\"Right Tail\"]) one = st.columns(1) with one[0]: p_hat1 = x1/n1", "= 1 -p_hat1 p_hat2 = x2/n2 q_hat2 = 1 -", "str(lower) +\", \"+str(upper)+\")\") if prop_choice == \"Two Proportions\": c1,c2,c3 =", "xlim = (-4,z)) normp = normp + stat_function(fun = norm.pdf,", "x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat", "[0]) st.write(data) normp = normp + geom_segment(aes(x = z, y", "1\",20)) n1 = int(st.text_input(\"Tries 1\",25)) with c2: x2 = int(st.text_input(\"Hits", "with c1: x1 = int(st.text_input(\"Hits 1\",20)) n1 = int(st.text_input(\"Tries 1\",25))", "q_hat2 = 1 - p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat =", "(p_hat1 - p_hat2)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x) ndf", "c3: st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"])", "1 - 2*alpha normp = normp + stat_function(fun = norm.pdf,", "2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = \"±\" + str(abs(norm.ppf(alpha/2))) cl", "p_hat1 = x1/n1 q_hat1 = 1 -p_hat1 p_hat2 = x2/n2", "+ stat_function(fun = norm.pdf, geom = \"area\",fill = \"steelblue\", xlim", "z = (p_hat1 - p_hat2)/tsd x = np.arange(-4,4,.1) y =", "* cise rme = \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test", "= \"area\",fill = \"orange\", xlim = (-4,cz)) if tail_choice ==", "= normp + geom_segment(aes(x = z, y = 0, xend", "+ str(abs(me)) data = pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI", "= cz cl = 1 - 2*alpha normp = normp", "normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat - abs(me) upper", "cl = 1 - 2*alpha normp = normp + stat_function(fun", "upper = p_hat + abs(me) st.write(str(100*cl) + \"'%' confidence interval", "= dp_hat + abs(me) st.write(str(100*cl) + \"'%' confidence interval is", "p_hat2 pp_hat = (x1+x2)/(n1+n2) dp_hat = p_hat1 - p_hat2 pq_hat", "import streamlit as st import math from scipy.stats import *", "normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat - abs(me) upper", "tail_choice == \"Right Tail\": pv = 1 - norm.cdf(z) cz", "nullp = float(st.text_input(\"Null:\",.7)) alpha = float(st.text_input(\"Alpha\",.05)) with c3: st.markdown(\"Pick a", "st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"]) one = st.columns(1) with one[0]: p_hat1", "abs(me) upper = dp_hat + abs(me) st.write(str(100*cl) + \"'%' confidence", "c2: x2 = int(st.text_input(\"Hits 2\",30)) n2 = int(st.text_input(\"Tries 2\",50)) with", "title of the app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\") prop_choice = st.sidebar.radio(\"\",[\"One", "= int(st.text_input(\"Hits 2\",30)) n2 = int(st.text_input(\"Tries 2\",50)) with c3: alpha", "one = st.columns(1) with one[0]: p_hat1 = x1/n1 q_hat1 =", "= \"steelblue\", xlim = (-4,z)) normp = normp + stat_function(fun", "app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\") prop_choice = st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"]) if", "= (-4,-1*abs(z))) normp = normp + stat_function(fun = norm.pdf, geom", "rme = \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index", "= 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z =", "\"area\",fill = \"orange\", xlim = (-4,-1*abs(cz))) normp = normp +", "1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp", "stat_function(fun = norm.pdf, geom = \"area\",fill = \"steelblue\", xlim =", "= pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0])", "n = int(st.text_input(\"Tries\",25)) with c2: nullp = float(st.text_input(\"Null:\",.7)) alpha =", "normp + stat_function(fun = norm.pdf, geom = \"area\",fill = \"steelblue\",", "rme = \"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat 1\":p_hat1,\"p-Hat 2\":p_hat2,\"Pooled", "\"+str(upper)+\")\") if prop_choice == \"Two Proportions\": c1,c2,c3 = st.columns(3) with", "normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat -", "pv = 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = \"±\" +", "= st.columns(1) with one[0]: p_hat = x/n tsd = math.sqrt(nullp*(1-nullp)/n)", "= normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat - abs(me)", "= 2*(1-norm.cdf(abs(z))) cz = abs(norm.ppf(alpha/2)) rcz = \"±\" + str(abs(norm.ppf(alpha/2)))", "\"steelblue\", xlim = (-4,-1*abs(z))) normp = normp + stat_function(fun =", "= st.columns(1) with one[0]: p_hat1 = x1/n1 q_hat1 = 1", "xlim = (-4,-1*abs(z))) normp = normp + stat_function(fun = norm.pdf,", "norm.pdf(z)),color=\"red\") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat", "= z, y = 0, xend = z, yend =", "normp + geom_segment(aes(x = z, y = 0, xend =", "st.columns(1) with one[0]: p_hat1 = x1/n1 q_hat1 = 1 -p_hat1", "= p_hat - abs(me) upper = p_hat + abs(me) st.write(str(100*cl)", "norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y}) normp = ggplot(ndf) + coord_fixed(ratio =", "math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 - p_hat2)/tsd x", "= \"area\",fill = \"orange\", xlim = (-4,-1*abs(cz))) normp = normp", "float(st.text_input(\"Alpha\",.05)) with c3: st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two", "4) if tail_choice == \"Left Tail\": pv = norm.cdf(z) cz", "1 - norm.cdf(z) cz = -1 * norm.ppf(alpha) rcz =", "Tail\",\"Two Tails\",\"Right Tail\"]) one = st.columns(1) with one[0]: p_hat1 =", "geom = \"area\",fill = \"steelblue\", xlim = (abs(z),4)) normp =", "+ geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat - abs(me) upper =", "st.write(data) normp = normp + geom_segment(aes(x = z, y =", "= norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (z,4))", "= 1 - 2*alpha normp = normp + stat_function(fun =", "a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"]) one =", "= normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat - abs(me)", "alpha = float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two", "= \"orange\", xlim = (abs(cz),4)) if tail_choice == \"Right Tail\":", "= st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"]) one = st.columns(1) with one[0]:", "= x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z =", "(x1+x2)/(n1+n2) dp_hat = p_hat1 - p_hat2 pq_hat = 1-pp_hat tsd", "== \"Right Tail\": pv = 1 - norm.cdf(z) cz =", "math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 - p_hat2)/tsd x = np.arange(-4,4,.1) y", "tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 -", "+ stat_function(fun = norm.pdf, geom = \"area\",fill = \"orange\", xlim", "norm.pdf, geom = \"area\",fill = \"orange\", xlim = (-4,-1*abs(cz))) normp", "Proportion\": c1,c2,c3 = st.columns(3) with c1: x = int(st.text_input(\"Hits\",20)) n", "2\":p_hat2,\"Pooled p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp =", "xlim = (abs(z),4)) normp = normp + stat_function(fun = norm.pdf,", "st.sidebar.subheader(\"Proportion Settings\") prop_choice = st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"]) if prop_choice ==", "p_hat1 - p_hat2 pq_hat = 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise", "st.columns(3) with c1: x = int(st.text_input(\"Hits\",20)) n = int(st.text_input(\"Tries\",25)) with", "+ \"'%' confidence interval is (\" + str(lower) +\", \"+str(upper)+\")\")", "pq_hat = 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z", "xlim = (z,4)) normp = normp + stat_function(fun = norm.pdf,", "norm.pdf(z)),color=\"red\") normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat", "test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"]) one = st.columns(1)", "pv = 1 - norm.cdf(z) cz = -1 * norm.ppf(alpha)", "\"area\",fill = \"steelblue\", xlim = (z,4)) normp = normp +", "x2 = int(st.text_input(\"Hits 2\",30)) n2 = int(st.text_input(\"Tries 2\",50)) with c3:", "x1/n1 q_hat1 = 1 -p_hat1 p_hat2 = x2/n2 q_hat2 =", "import numpy as np from plotnine import * def app():", "xlim = (abs(cz),4)) if tail_choice == \"Right Tail\": pv =", "import * def app(): # title of the app st.subheader(\"Proportions\")", "= norm.pdf, geom = \"area\",fill = \"orange\", xlim = (-4,-1*abs(cz)))", "app(): # title of the app st.subheader(\"Proportions\") st.sidebar.subheader(\"Proportion Settings\") prop_choice", "math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat - nullp)/tsd x", "st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"]) one", "Tail\": pv = norm.cdf(z) cz = norm.ppf(alpha) rcz = cz", "= np.arange(-4,4,.1) y = norm.pdf(x) ndf = pd.DataFrame({\"x\":x,\"y\":y}) normp =", "with one[0]: p_hat = x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise =", "normp = ggplot(ndf) + coord_fixed(ratio = 4) if tail_choice ==", "one = st.columns(1) with one[0]: p_hat = x/n tsd =", "= cz * cise rme = \"±\" + str(abs(me)) data", "= norm.ppf(alpha) rcz = cz cl = 1 - 2*alpha", "- abs(me) upper = p_hat + abs(me) st.write(str(100*cl) + \"'%'", "c1,c2,c3 = st.columns(3) with c1: x1 = int(st.text_input(\"Hits 1\",20)) n1", "cz = norm.ppf(alpha) rcz = cz cl = 1 -", "\"'%' confidence interval is (\" + str(lower) +\", \"+str(upper)+\")\") if", "geom = \"area\",fill = \"steelblue\", xlim = (-4,z)) normp =", "Tail\",\"Two Tails\",\"Right Tail\"]) one = st.columns(1) with one[0]: p_hat =", "= \"steelblue\", xlim = (z,4)) normp = normp + stat_function(fun", "Settings\") prop_choice = st.sidebar.radio(\"\",[\"One Proportion\",\"Two Proportions\"]) if prop_choice == \"One", "norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (z,4)) normp", "(-4,z)) normp = normp + stat_function(fun = norm.pdf, geom =", "pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp = normp +", "geom = \"area\",fill = \"orange\", xlim = (-4,cz)) if tail_choice", "abs(me) st.write(str(100*cl) + \"'%' confidence interval is (\" + str(lower)", "ggplot(ndf) + coord_fixed(ratio = 4) if tail_choice == \"Left Tail\":", "c3: alpha = float(st.text_input(\"Alpha\",.05)) st.markdown(\"Pick a test:\") tail_choice = st.radio(\"\",[\"Left", "= abs(norm.ppf(alpha/2)) rcz = \"±\" + str(abs(norm.ppf(alpha/2))) cl = 1", "= (p_hat - nullp)/tsd x = np.arange(-4,4,.1) y = norm.pdf(x)", "scipy.stats import * import pandas as pd import numpy as", "if tail_choice == \"Two Tails\": pv = 2*(1-norm.cdf(abs(z))) cz =", "lower = dp_hat - abs(me) upper = dp_hat + abs(me)", "geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat - abs(me) upper = dp_hat", "st import math from scipy.stats import * import pandas as", "rcz = \"±\" + str(abs(norm.ppf(alpha/2))) cl = 1 - alpha", "= \"orange\", xlim = (cz,4)) me = cz * cise", "ndf = pd.DataFrame({\"x\":x,\"y\":y}) normp = ggplot(ndf) + coord_fixed(ratio = 4)", "- p_hat2 pq_hat = 1-pp_hat tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2)) cise =", "p-Hat\":pp_hat,\"Diff p-Hat\":dp_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp = normp", "geom_segment(aes(x = z, y = 0, xend = z, yend", "dp_hat = p_hat1 - p_hat2 pq_hat = 1-pp_hat tsd =", "str(abs(me)) data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp", "str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp = normp +", "+ geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = p_hat - abs(me) upper =", "p_hat - abs(me) upper = p_hat + abs(me) st.write(str(100*cl) +", "= pd.DataFrame({\"x\":x,\"y\":y}) normp = ggplot(ndf) + coord_fixed(ratio = 4) if", "= \"steelblue\", xlim = (abs(z),4)) normp = normp + stat_function(fun", "= (-4,cz)) if tail_choice == \"Two Tails\": pv = 2*(1-norm.cdf(abs(z)))", "- alpha normp = normp + stat_function(fun = norm.pdf, geom", "= math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2) z = (p_hat1 - p_hat2)/tsd x = np.arange(-4,4,.1)", "\"area\",fill = \"steelblue\", xlim = (-4,z)) normp = normp +", "+\", \"+str(upper)+\")\") if prop_choice == \"Two Proportions\": c1,c2,c3 = st.columns(3)", "= (-4,-1*abs(cz))) normp = normp + stat_function(fun = norm.pdf, geom", "tail_choice = st.radio(\"\",[\"Left Tail\",\"Two Tails\",\"Right Tail\"]) one = st.columns(1) with", "\"area\",fill = \"orange\", xlim = (cz,4)) me = cz *", "norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (-4,z)) normp", "(z,4)) normp = normp + stat_function(fun = norm.pdf, geom =", "= math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z = (p_hat - nullp)/tsd", "== \"Two Proportions\": c1,c2,c3 = st.columns(3) with c1: x1 =", "* import pandas as pd import numpy as np from", "= (x1+x2)/(n1+n2) dp_hat = p_hat1 - p_hat2 pq_hat = 1-pp_hat", "+ geom_segment(aes(x = z, y = 0, xend = z,", "= (abs(z),4)) normp = normp + stat_function(fun = norm.pdf, geom", "= float(st.text_input(\"Null:\",.7)) alpha = float(st.text_input(\"Alpha\",.05)) with c3: st.markdown(\"Pick a test:\")", "x = int(st.text_input(\"Hits\",20)) n = int(st.text_input(\"Tries\",25)) with c2: nullp =", "SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data) normp = normp + geom_segment(aes(x", "norm.cdf(z) cz = -1 * norm.ppf(alpha) rcz = cz cl", "pandas as pd import numpy as np from plotnine import", "norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (-4,-1*abs(z))) normp", "cz * cise rme = \"±\" + str(abs(me)) data =", "(\" + str(lower) +\", \"+str(upper)+\")\") if prop_choice == \"Two Proportions\":", "p_hat = x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise = math.sqrt(p_hat*(1-p_hat)/n) z", "\"±\" + str(abs(me)) data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0])", "= \"±\" + str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp", "\"area\",fill = \"orange\", xlim = (abs(cz),4)) if tail_choice == \"Right", "= \"area\",fill = \"steelblue\", xlim = (-4,z)) normp = normp", "from plotnine import * def app(): # title of the", "x1 = int(st.text_input(\"Hits 1\",20)) n1 = int(st.text_input(\"Tries 1\",25)) with c2:", "normp + stat_function(fun = norm.pdf, geom = \"area\",fill = \"orange\",", "q_hat1 = 1 -p_hat1 p_hat2 = x2/n2 q_hat2 = 1", "normp = normp + geom_line(aes(x=x,y=y)) st.pyplot(ggplot.draw(normp)) lower = dp_hat -", "= [0]) st.write(data) normp = normp + geom_segment(aes(x = z,", "st.columns(3) with c1: x1 = int(st.text_input(\"Hits 1\",20)) n1 = int(st.text_input(\"Tries", "st.pyplot(ggplot.draw(normp)) lower = p_hat - abs(me) upper = p_hat +", "dp_hat + abs(me) st.write(str(100*cl) + \"'%' confidence interval is (\"", "cz cl = 1 - 2*alpha normp = normp +", "+ str(abs(me)) data = pd.DataFrame({\"p-Hat\":p_hat,\"z-Score\":z,\"p-Value\":pv,\"CV\":rcz,\"Test SD\":tsd,\"C-Level\":cl,\"CI SE\":cise,\"ME\":rme},index = [0]) st.write(data)", "= (-4,z)) normp = normp + stat_function(fun = norm.pdf, geom", "-p_hat1 p_hat2 = x2/n2 q_hat2 = 1 - p_hat2 pp_hat", "= norm.pdf, geom = \"area\",fill = \"steelblue\", xlim = (abs(z),4))", "st.columns(1) with one[0]: p_hat = x/n tsd = math.sqrt(nullp*(1-nullp)/n) cise", "+ str(abs(norm.ppf(alpha/2))) cl = 1 - alpha normp = normp", "norm.cdf(z) cz = norm.ppf(alpha) rcz = cz cl = 1", "\"area\",fill = \"steelblue\", xlim = (-4,-1*abs(z))) normp = normp +", "from scipy.stats import * import pandas as pd import numpy", "= \"area\",fill = \"orange\", xlim = (cz,4)) me = cz" ]
[ "client.v2.service_instances.list(): if cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt +=", "config_test import build_client_from_configuration _logger = logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self):", "test_create_update_delete(self): client = build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid, client.creation_parameters)", "logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client = build_client_from_configuration() result =", "> 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else: _logger.warning(\"update test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def", "= 0 for instance in client.v2.service_instances.list(): if cpt == 0:", "client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else: _logger.warning(\"update test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self): client", "import logging import unittest from config_test import build_client_from_configuration _logger =", "== 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt += 1 _logger.debug(\"test_get -", "self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt += 1 _logger.debug(\"test_get - %d found\", cpt)", "class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client = build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid,", "0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt += 1 _logger.debug(\"test_get - %d", "logging import unittest from config_test import build_client_from_configuration _logger = logging.getLogger(__name__)", "client.creation_parameters) if len(client.update_parameters) > 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else: _logger.warning(\"update test", "= build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid, client.creation_parameters) if len(client.update_parameters)", "client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self): client = build_client_from_configuration() cpt = 0 for", "\"test_name\", client.plan_guid, client.creation_parameters) if len(client.update_parameters) > 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else:", "from config_test import build_client_from_configuration _logger = logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def", "_logger = logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client = build_client_from_configuration()", "= client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid, client.creation_parameters) if len(client.update_parameters) > 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"],", "_logger.warning(\"update test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self): client = build_client_from_configuration() cpt", "len(client.update_parameters) > 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else: _logger.warning(\"update test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"])", "client = build_client_from_configuration() cpt = 0 for instance in client.v2.service_instances.list():", "0 for instance in client.v2.service_instances.list(): if cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"]))", "if cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt += 1", "self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt += 1 _logger.debug(\"test_get - %d found\",", "else: _logger.warning(\"update test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self): client = build_client_from_configuration()", "0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else: _logger.warning(\"update test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self):", "client.update_parameters) else: _logger.warning(\"update test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self): client =", "build_client_from_configuration() cpt = 0 for instance in client.v2.service_instances.list(): if cpt", "def test_create_update_delete(self): client = build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid,", "client = build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid, client.creation_parameters) if", "import unittest from config_test import build_client_from_configuration _logger = logging.getLogger(__name__) class", "cpt = 0 for instance in client.v2.service_instances.list(): if cpt ==", "instance in client.v2.service_instances.list(): if cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"]))", "import build_client_from_configuration _logger = logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client", "= logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client = build_client_from_configuration() result", "test skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self): client = build_client_from_configuration() cpt =", "for instance in client.v2.service_instances.list(): if cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"]))", "TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client = build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid, \"test_name\",", "result = client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid, client.creation_parameters) if len(client.update_parameters) > 0:", "test_get(self): client = build_client_from_configuration() cpt = 0 for instance in", "build_client_from_configuration() result = client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid, client.creation_parameters) if len(client.update_parameters) >", "unittest from config_test import build_client_from_configuration _logger = logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase):", "in client.v2.service_instances.list(): if cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt", "def test_get(self): client = build_client_from_configuration() cpt = 0 for instance", "client.plan_guid, client.creation_parameters) if len(client.update_parameters) > 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else: _logger.warning(\"update", "build_client_from_configuration _logger = logging.getLogger(__name__) class TestServiceInstances(unittest.TestCase): def test_create_update_delete(self): client =", "skipped\") client.v2.service_instances.remove(result[\"metadata\"][\"guid\"]) def test_get(self): client = build_client_from_configuration() cpt = 0", "cpt == 0: self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance[\"entity\"][\"space_guid\"])) self.assertIsNotNone(client.v2.service_instances.get(instance[\"metadata\"][\"guid\"])) self.assertIsNotNone(client.v2.service_instances.list_permissions(instance[\"metadata\"][\"guid\"])) cpt += 1 _logger.debug(\"test_get", "client.v2.service_instances.create(client.space_guid, \"test_name\", client.plan_guid, client.creation_parameters) if len(client.update_parameters) > 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters)", "if len(client.update_parameters) > 0: client.v2.service_instances.update(result[\"metadata\"][\"guid\"], client.update_parameters) else: _logger.warning(\"update test skipped\")", "= build_client_from_configuration() cpt = 0 for instance in client.v2.service_instances.list(): if", "<reponame>subhash12/cf-python-client<gh_stars>10-100 import logging import unittest from config_test import build_client_from_configuration _logger" ]
[]
[ "test_bv, test_application # make a parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\",", "obj_value: %r\", obj_value) # normalize value = obj_value.value if _debug:", "if _debug: COVConsoleCmd._debug(\" - obj_value: %r\", obj_value) # normalize value", "a list of test values self.test_values = list(float(i * 10)", "found: %r\" % (object_name,)) datatype = obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(\"", "- obj_value: %r\", obj_value) # normalize value = obj_value.value if", "COVConsoleCmd._debug(\" - normalized value: %r\", value) # change the value", "def process_task(self): if _debug: TestBinaryValueTask._debug(\"process_task\") global test_bv # pop the", "type=float, help=\"analog value thread\", ) # analog value task and", "# make a device object this_device = LocalDeviceObject(ini=args.ini) if _debug:", "An instance of this class is created when '--bvthread <interval>'", "and thread parser.add_argument(\"--avtask\", type=float, help=\"analog value recurring task\", ) parser.add_argument(\"--avthread\",", "1), objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0, 0], ) _log.debug(\" -", "point test_av.presentValue = next_value @bacpypes_debugging class TestAnalogValueThread(Thread): \"\"\" An instance", "test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0,", "threads enable_sleeping() # analog value task if args.avtask: test_av_task =", "_debug: COVConsoleCmd._debug(\"do_status %r\", args) global test_application # dump from the", "threading import Thread from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging", "args = args.split() if _debug: COVConsoleCmd._debug(\"do_status %r\", args) global test_application", "datatype: raise RuntimeError(\"not a property: %r\" % (property_name,)) # toss", "test_av.presentValue = next_value # sleep time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask): \"\"\"", "TestBinaryValueTask._debug(\"process_task\") global test_bv # pop the next value next_value =", "console accepts commands that change the properties of an object", "line argument. Every <interval> seconds it changes the value of", "if _debug: TestAnalogValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000) #", "test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value thread if args.bvthread:", "'--bvtask <interval>' is specified as a command line argument. Every", "= ModuleLogger(globals()) # test globals test_av = None test_bv =", "object that triggers the notifications. \"\"\" import time from threading", "sample application is a server that supports COV notification services.", "it changes the value of the test_bv present value. \"\"\"", "_debug: TestAnalogValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000) # make", "import BIPSimpleApplication from bacpypes.object import AnalogValueObject, BinaryValueObject from bacpypes.local.device import", "self.test_values = list(100.0 + float(i * 10) for i in", "args.pop(0) if _debug: COVConsoleCmd._debug(\" - object_name: %r\", object_name) if _debug:", "instance of this class is created when '--bvthread <interval>' is", "COVConsoleCmd._debug(\" - datatype: %r\", datatype) if not datatype: raise RuntimeError(\"not", "class TestAnalogValueTask(RecurringTask): \"\"\" An instance of this class is created", "the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(\"", "to the device test_application.add_object(test_bv) # make a console if args.console:", "value) # see if it can be built obj_value =", ". ] property_name [ = ] value\"\"\" args = args.split()", "obj_value = datatype(value) if _debug: COVConsoleCmd._debug(\" - obj_value: %r\", obj_value)", "ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\", default=False, help=\"create a console\", ) # analog", "value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0,", "ChangeOfValueServices # some debugging _debug = 0 _log = ModuleLogger(globals())", "value. \"\"\" def __init__(self, interval): if _debug: TestBinaryValueThread._debug(\"__init__ %r\", interval)", "0], ) _log.debug(\" - test_bv: %r\", test_bv) # add it", "this class is created when '--bvthread <interval>' is specified as", "the command line arguments args = parser.parse_args() if _debug: _log.debug(\"initialization\")", "%r\", property_name) obj = test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(\" - obj:", "# change the point test_av.presentValue = next_value # sleep time.sleep(self.interval)", "%r\", this_device.objectList) # make a binary value object test_bv =", "global test_application if not args: print(\"object name required\") return obj", "_log.debug(\" - args: %r\", args) # make a device object", "- args: %r\", args) # make a device object this_device", "test_av present value. \"\"\" def __init__(self, interval): if _debug: TestAnalogValueThread._debug(\"__init__", "from bacpypes.consolecmd import ConsoleCmd from bacpypes.core import run, deferred, enable_sleeping", "test_application = None # # SubscribeCOVApplication # @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication,", "@bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def do_status(self, args): \"\"\"status\"\"\" args = args.split()", "device test_application.add_object(test_bv) # make a console if args.console: test_console =", "binary value object test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive',", "the test_av present value. \"\"\" def __init__(self, interval): if _debug:", "None test_application = None # # SubscribeCOVApplication # @bacpypes_debugging class", "SubscribeCOVApplication # @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass # # COVConsoleCmd", "%r\" % (object_name,)) datatype = obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(\" -", "a property: %r\" % (property_name,)) # toss the equals if", "= COVConsoleCmd() _log.debug(\" - test_console: %r\", test_console) # enable sleeping", "present value. \"\"\" def __init__(self, interval): if _debug: TestAnalogValueTask._debug(\"__init__ %r\",", "make a parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\", default=False, help=\"create", "Exception as err: print(\"exception: %s\" % (err,)) def do_write(self, args):", "else: property_name = args.pop(0) if _debug: COVConsoleCmd._debug(\" - object_name: %r\",", "%r\", obj) if not obj: raise RuntimeError(\"object not found: %r\"", "object_name) if _debug: COVConsoleCmd._debug(\" - property_name: %r\", property_name) obj =", "if _debug: TestAnalogValueThread._debug(\" - next_value: %r\", next_value) # change the", "make a sample application test_application = SubscribeCOVApplication(this_device, args.ini.address) # make", "= object_name.split('.') else: property_name = args.pop(0) if _debug: COVConsoleCmd._debug(\" -", "# SubscribeCOVApplication # @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass # #", "%r\", datatype) if not datatype: raise RuntimeError(\"not a property: %r\"", "lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, )) def do_trigger(self, args): \"\"\"trigger", "test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(\" - obj: %r\", obj) if not", "args[0] == '=': args.pop(0) # evaluate the value value =", "while True: # pop the next value next_value = self.test_values.pop(0)", "float(i * 10) for i in range(10)) def run(self): if", "make a console if args.console: test_console = COVConsoleCmd() _log.debug(\" -", "task if args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value", "def __init__(self, interval): if _debug: TestBinaryValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) #", "the value of the test_bv present value. \"\"\" def __init__(self,", "# make a binary value object test_bv = BinaryValueObject( objectIdentifier=('binaryValue',", "obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(\" - datatype: %r\", datatype) if not", "err: print(\"exception: %s\" % (err,)) def do_write(self, args): \"\"\"write object_name", "analog value task if args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() #", "self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(\" - next_value: %r\", next_value) # change", "this_device) # make a sample application test_application = SubscribeCOVApplication(this_device, args.ini.address)", "test_bv while True: # pop the next value next_value =", "def do_status(self, args): \"\"\"status\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_status", "# make a list of test values self.test_values = list(100.0", "interval) Thread.__init__(self) # runs as a daemon self.daemon = True", "list(100.0 + float(i * 10) for i in range(10)) def", "BIPSimpleApplication from bacpypes.object import AnalogValueObject, BinaryValueObject from bacpypes.local.device import LocalDeviceObject", "analog value task and thread parser.add_argument(\"--bvtask\", type=float, help=\"binary value recurring", "statusFlags=[0, 0, 0, 0], covIncrement=1.0, ) _log.debug(\" - test_av: %r\",", "test values self.test_values = list(float(i * 10) for i in", "value) # pass it along obj.WriteProperty(property_name, value) except IndexError: print(COVConsoleCmd.do_write.__doc__)", "# @bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def do_status(self, args): \"\"\"status\"\"\" args =", "0, 0], covIncrement=1.0, ) _log.debug(\" - test_av: %r\", test_av) #", "next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread): \"\"\" An instance of this", "obj_value) # normalize value = obj_value.value if _debug: COVConsoleCmd._debug(\" -", "args) global test_application try: object_name = args.pop(0) if '.' in", "along obj.WriteProperty(property_name, value) except IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception as err:", "required\") return obj = test_application.get_object_name(args[0]) if not obj: print(\"no such", "value) # change the value setattr(obj, property_name, value) except IndexError:", "_debug: TestAnalogValueTask._debug(\"process_task\") global test_av # pop the next value next_value", "can be built obj_value = datatype(value) if _debug: COVConsoleCmd._debug(\" -", "%r\", value) # see if it can be built obj_value", "- test_bv: %r\", test_bv) # add it to the device", "accepts commands that change the properties of an object that", "property_name) obj = test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(\" - obj: %r\",", "# make a list of test values self.test_values = list(float(i", "change the point test_bv.presentValue = next_value # sleep time.sleep(self.interval) def", "= eval(args.pop(0)) if _debug: COVConsoleCmd._debug(\" - raw value: %r\", value)", "value: %r\", value) # change the value setattr(obj, property_name, value)", "i in range(10)) def process_task(self): if _debug: TestAnalogValueTask._debug(\"process_task\") global test_av", "%r\", interval) RecurringTask.__init__(self, interval * 1000) # make a list", "\"\"\" This sample application is a server that supports COV", "# test globals test_av = None test_bv = None test_application", "dict for obj_ref, cov_detection in test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref)) for", "= next_value # sleep time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask): \"\"\" An", "bacpypes.service.cov import ChangeOfValueServices # some debugging _debug = 0 _log", "%r\", args) # make a device object this_device = LocalDeviceObject(ini=args.ini)", "- datatype: %r\", datatype) if not datatype: raise RuntimeError(\"not a", "# provide a different spin value parser.add_argument(\"--spin\", type=float, help=\"spin time\",", "that supports COV notification services. The console accepts commands that", "value parser.add_argument(\"--spin\", type=float, help=\"spin time\", default=1.0, ) # parse the", "= ] value\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_set %r\",", "'--avtask <interval>' is specified as a command line argument. Every", "TestAnalogValueThread._debug(\" - next_value: %r\", next_value) # change the point test_av.presentValue", "# make a console if args.console: test_console = COVConsoleCmd() _log.debug(\"", "args: %r\", args) # make a device object this_device =", "args.pop(0) if '.' in object_name: object_name, property_name = object_name.split('.') else:", "test_bv # pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value)", "'.' in object_name: object_name, property_name = object_name.split('.') else: property_name =", "ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd from", "test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value task if args.bvtask:", "args.pop(0) # evaluate the value value = eval(args.pop(0)) if _debug:", "as a daemon self.daemon = True # save the interval", "= LocalDeviceObject(ini=args.ini) if _debug: _log.debug(\" - this_device: %r\", this_device) #", "make a list of test values self.test_values = [True, False]", "%r\", value) # change the value setattr(obj, property_name, value) except", "Every <interval> seconds it changes the value of the test_av", "- test_console: %r\", test_console) # enable sleeping will help with", "such object\") return # get the detection algorithm object cov_detection", "= test_application.get_object_name(args[0]) if not obj: print(\"no such object\") return #", "not datatype: raise RuntimeError(\"not a property: %r\" % (property_name,)) #", "global test_av, test_bv, test_application # make a parser parser =", "the value setattr(obj, property_name, value) except IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception", "device object this_device = LocalDeviceObject(ini=args.ini) if _debug: _log.debug(\" - this_device:", "be built obj_value = datatype(value) if _debug: COVConsoleCmd._debug(\" - obj_value:", "if _debug: TestAnalogValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as a", "RecurringTask.__init__(self, interval * 1000) # make a list of test", "[True, False] def run(self): if _debug: TestBinaryValueThread._debug(\"run\") global test_bv while", "__init__(self, interval): if _debug: TestBinaryValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval *", "of test values self.test_values = [True, False] def run(self): if", "for obj_ref, cov_detection in test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription", "is created when '--avthread <interval>' is specified as a command", "an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av',", "if _debug: TestBinaryValueTask._debug(\"process_task\") global test_bv # pop the next value", "if _debug: COVConsoleCmd._debug(\" - property_name: %r\", property_name) obj = test_application.get_object_name(object_name)", "TestBinaryValueThread(RecurringTask, Thread): \"\"\" An instance of this class is created", "True # save the interval self.interval = interval # make", "{}\".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription in cov_detection.cov_subscriptions: print(\" {} proc_id={} confirmed={}", "0, 0], ) _log.debug(\" - test_bv: %r\", test_bv) # add", "triggers the notifications. \"\"\" import time from threading import Thread", "value of the test_bv present value. \"\"\" def __init__(self, interval):", "notification services. The console accepts commands that change the properties", "@bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass # # COVConsoleCmd # @bacpypes_debugging", "property_name [ = ] value\"\"\" args = args.split() if _debug:", "= None # # SubscribeCOVApplication # @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices):", "RuntimeError(\"not a property: %r\" % (property_name,)) # toss the equals", "console if args.console: test_console = COVConsoleCmd() _log.debug(\" - test_console: %r\",", "# change the point test_av.presentValue = next_value @bacpypes_debugging class TestAnalogValueThread(Thread):", "args.split() if _debug: COVConsoleCmd._debug(\"do_trigger %r\", args) global test_application if not", "if _debug: TestBinaryValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000) #", "the test_bv present value. \"\"\" def __init__(self, interval): if _debug:", "self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(\" - next_value: %r\", next_value) #", "<interval> seconds it changes the value of the test_bv present", "print(\"object name required\") return obj = test_application.get_object_name(args[0]) if not obj:", "value) except IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception as err: print(\"exception: %s\"", "this_device = LocalDeviceObject(ini=args.ini) if _debug: _log.debug(\" - this_device: %r\", this_device)", "%r\", test_av) # add it to the device test_application.add_object(test_av) _log.debug(\"", "the point test_av.presentValue = next_value @bacpypes_debugging class TestAnalogValueThread(Thread): \"\"\" An", "print(\"no subscriptions for that object\") return # tell it to", "bacpypes.object import AnalogValueObject, BinaryValueObject from bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov", "print(\" {} proc_id={} confirmed={} lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, ))", "next_value) # change the point test_av.presentValue = next_value @bacpypes_debugging class", "(object_name,)) datatype = obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(\" - datatype: %r\",", "bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd", "BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0, 0], )", "the device test_application.add_object(test_bv) # make a console if args.console: test_console", "command line argument. Every <interval> seconds it changes the value", "_debug: COVConsoleCmd._debug(\" - obj_value: %r\", obj_value) # normalize value =", "if _debug: COVConsoleCmd._debug(\"do_set %r\", args) global test_application try: object_name =", "obj: raise RuntimeError(\"object not found: %r\" % (object_name,)) datatype =", "if _debug: TestAnalogValueTask._debug(\"process_task\") global test_av # pop the next value", "_debug: TestBinaryValueTask._debug(\"process_task\") global test_bv # pop the next value next_value", "Thread from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser", "__init__(self, interval): if _debug: TestBinaryValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs", "[True, False] def process_task(self): if _debug: TestBinaryValueTask._debug(\"process_task\") global test_bv #", "test_console = COVConsoleCmd() _log.debug(\" - test_console: %r\", test_console) # enable", "TestBinaryValueThread._debug(\" - next_value: %r\", next_value) # change the point test_bv.presentValue", "help=\"create a console\", ) # analog value task and thread", "SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass # # COVConsoleCmd # @bacpypes_debugging class COVConsoleCmd(ConsoleCmd):", "# change the point test_bv.presentValue = next_value # sleep time.sleep(self.interval)", "LocalDeviceObject(ini=args.ini) if _debug: _log.debug(\" - this_device: %r\", this_device) # make", "obj_ref, cov_detection in test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription in", "name required\") return obj = test_application.get_object_name(args[0]) if not obj: print(\"no", "== '=': args.pop(0) # evaluate the value value = eval(args.pop(0))", "thread\", ) # provide a different spin value parser.add_argument(\"--spin\", type=float,", "thread parser.add_argument(\"--bvtask\", type=float, help=\"binary value recurring task\", ) parser.add_argument(\"--bvthread\", type=float,", "class COVConsoleCmd(ConsoleCmd): def do_status(self, args): \"\"\"status\"\"\" args = args.split() if", "do_status(self, args): \"\"\"status\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_status %r\",", "properties of an object that triggers the notifications. \"\"\" import", "if _debug: COVConsoleCmd._debug(\"do_trigger %r\", args) global test_application if not args:", "An instance of this class is created when '--avtask <interval>'", "the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(\"", "_log.debug(\" - this_device: %r\", this_device) # make a sample application", "binary value task if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() #", "%r\", value) # pass it along obj.WriteProperty(property_name, value) except IndexError:", "= obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(\" - datatype: %r\", datatype) if", "confirmed={} lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, )) def do_trigger(self, args):", "runs as a daemon self.daemon = True # save the", "values self.test_values = [True, False] def run(self): if _debug: TestBinaryValueThread._debug(\"run\")", "import RecurringTask from bacpypes.app import BIPSimpleApplication from bacpypes.object import AnalogValueObject,", "value task and thread parser.add_argument(\"--bvtask\", type=float, help=\"binary value recurring task\",", "server that supports COV notification services. The console accepts commands", "= args.split() if _debug: COVConsoleCmd._debug(\"do_status %r\", args) global test_application #", "self.test_values = [True, False] def process_task(self): if _debug: TestBinaryValueTask._debug(\"process_task\") global", "= next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread): \"\"\" An instance of", "obj: %r\", obj) if not obj: raise RuntimeError(\"object not found:", "to send out notifications cov_detection.send_cov_notifications() def do_set(self, args): \"\"\"set object_name", "object_name [ . ] property_name [ = ] value\"\"\" args", "range(10)) def run(self): if _debug: TestAnalogValueThread._debug(\"run\") global test_av while True:", "TestBinaryValueTask._debug(\" - next_value: %r\", next_value) # change the point test_bv.presentValue", "== 0): print(\"no subscriptions for that object\") return # tell", "if args[0] == '=': args.pop(0) # evaluate the value value", "enable_sleeping from bacpypes.task import RecurringTask from bacpypes.app import BIPSimpleApplication from", "self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(\" - next_value: %r\", next_value) #", "value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(\" - next_value:", "ChangeOfValueServices): pass # # COVConsoleCmd # @bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def", "def main(): global test_av, test_bv, test_application # make a parser", "def run(self): if _debug: TestAnalogValueThread._debug(\"run\") global test_av while True: #", "test_bv = None test_application = None # # SubscribeCOVApplication #", "next_value: %r\", next_value) # change the point test_av.presentValue = next_value", "task and thread parser.add_argument(\"--bvtask\", type=float, help=\"binary value recurring task\", )", "TestAnalogValueTask._debug(\" - next_value: %r\", next_value) # change the point test_av.presentValue", "%r\", next_value) # change the point test_av.presentValue = next_value @bacpypes_debugging", "if _debug: TestBinaryValueTask._debug(\" - next_value: %r\", next_value) # change the", "print(\"no such object\") return # get the detection algorithm object", "= ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\", default=False, help=\"create a console\", ) #", "created when '--avthread <interval>' is specified as a command line", "1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0, ) _log.debug(\"", "algorithm object cov_detection = test_application.cov_detections.get(obj, None) if (not cov_detection) or", "args): \"\"\"trigger object_name\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_trigger %r\",", "COV notification services. The console accepts commands that change the", "0 _log = ModuleLogger(globals()) # test globals test_av = None", "of the test_av present value. \"\"\" def __init__(self, interval): if", "= parser.parse_args() if _debug: _log.debug(\"initialization\") if _debug: _log.debug(\" - args:", "COVConsoleCmd._debug(\" - object_name: %r\", object_name) if _debug: COVConsoleCmd._debug(\" - property_name:", "cov_detection in test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription in cov_detection.cov_subscriptions:", "test_bv) # add it to the device test_application.add_object(test_bv) # make", "property [ = ] value\"\"\" args = args.split() if _debug:", "= obj_value.value if _debug: COVConsoleCmd._debug(\" - normalized value: %r\", value)", "property_name = object_name.split('.') else: property_name = args.pop(0) if _debug: COVConsoleCmd._debug(\"", "RuntimeError(\"object not found: %r\" % (object_name,)) datatype = obj.get_datatype(property_name) if", "it changes the value of the test_av present value. \"\"\"", "_debug: TestAnalogValueThread._debug(\" - next_value: %r\", next_value) # change the point", ") # parse the command line arguments args = parser.parse_args()", "if args.console: test_console = COVConsoleCmd() _log.debug(\" - test_console: %r\", test_console)", "some debugging _debug = 0 _log = ModuleLogger(globals()) # test", "_debug = 0 _log = ModuleLogger(globals()) # test globals test_av", "value thread\", ) # analog value task and thread parser.add_argument(\"--bvtask\",", "arguments args = parser.parse_args() if _debug: _log.debug(\"initialization\") if _debug: _log.debug(\"", "of this class is created when '--bvthread <interval>' is specified", "test_application.add_object(test_av) _log.debug(\" - object list: %r\", this_device.objectList) # make a", "make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1),", "test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription in cov_detection.cov_subscriptions: print(\" {}", "value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(\" - next_value:", "# add it to the device test_application.add_object(test_bv) # make a", "except Exception as err: print(\"exception: %s\" % (err,)) @bacpypes_debugging class", "COVConsoleCmd._debug(\" - obj_value: %r\", obj_value) # normalize value = obj_value.value", "next_value) # change the point test_av.presentValue = next_value # sleep", "test_application # dump from the COV detections dict for obj_ref,", "of this class is created when '--avthread <interval>' is specified", "that change the properties of an object that triggers the", "TestAnalogValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000) # make a", "args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value thread if", "def do_trigger(self, args): \"\"\"trigger object_name\"\"\" args = args.split() if _debug:", "value = obj_value.value if _debug: COVConsoleCmd._debug(\" - normalized value: %r\",", "is a server that supports COV notification services. The console", "the value of the test_av present value. \"\"\" def __init__(self,", "the notifications. \"\"\" import time from threading import Thread from", "when '--bvtask <interval>' is specified as a command line argument.", "import run, deferred, enable_sleeping from bacpypes.task import RecurringTask from bacpypes.app", "# change the point test_bv.presentValue = next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask,", "change the point test_av.presentValue = next_value # sleep time.sleep(self.interval) @bacpypes_debugging", "Thread.__init__(self) # runs as a daemon self.daemon = True #", "if _debug: _log.debug(\" - this_device: %r\", this_device) # make a", "test_bv present value. \"\"\" def __init__(self, interval): if _debug: TestBinaryValueTask._debug(\"__init__", "_log = ModuleLogger(globals()) # test globals test_av = None test_bv", "value thread if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary", "_debug: TestBinaryValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000) # save", "_debug: TestBinaryValueTask._debug(\" - next_value: %r\", next_value) # change the point", "test_bv.presentValue = next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread): \"\"\" An instance", "thread parser.add_argument(\"--avtask\", type=float, help=\"analog value recurring task\", ) parser.add_argument(\"--avthread\", type=float,", "presentValue='inactive', statusFlags=[0, 0, 0, 0], ) _log.debug(\" - test_bv: %r\",", "\"\"\" def __init__(self, interval): if _debug: TestBinaryValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self,", "_debug: COVConsoleCmd._debug(\" - object_name: %r\", object_name) if _debug: COVConsoleCmd._debug(\" -", "= next_value # sleep time.sleep(self.interval) def main(): global test_av, test_bv,", "sleeping will help with threads enable_sleeping() # analog value task", "# @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass # # COVConsoleCmd #", "datatype) if not datatype: raise RuntimeError(\"not a property: %r\" %", "test_bv.presentValue = next_value # sleep time.sleep(self.interval) def main(): global test_av,", "if _debug: _log.debug(\" - args: %r\", args) # make a", "if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\") run(args.spin) _log.debug(\"fini\") if", "from bacpypes.service.cov import ChangeOfValueServices # some debugging _debug = 0", "cov_detection.send_cov_notifications() def do_set(self, args): \"\"\"set object_name [ . ] property_name", "interval): if _debug: TestAnalogValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as", "test_av # pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value)", "None # # SubscribeCOVApplication # @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass", "value setattr(obj, property_name, value) except IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception as", "a sample application test_application = SubscribeCOVApplication(this_device, args.ini.address) # make an", "# add it to the device test_application.add_object(test_av) _log.debug(\" - object", "- object list: %r\", this_device.objectList) # make a binary value", "application test_application = SubscribeCOVApplication(this_device, args.ini.address) # make an analog value", "value = eval(args.pop(0)) if _debug: COVConsoleCmd._debug(\" - raw value: %r\",", "cov_subscription.lifetime, )) def do_trigger(self, args): \"\"\"trigger object_name\"\"\" args = args.split()", "value) except IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception as err: print(\"exception: %s\"", "it to the device test_application.add_object(test_av) _log.debug(\" - object list: %r\",", "of this class is created when '--avtask <interval>' is specified", "_debug: TestBinaryValueThread._debug(\" - next_value: %r\", next_value) # change the point", "if not args: print(\"object name required\") return obj = test_application.get_object_name(args[0])", "a server that supports COV notification services. The console accepts", "%r\", object_name) if _debug: COVConsoleCmd._debug(\" - property_name: %r\", property_name) obj", "add it to the device test_application.add_object(test_bv) # make a console", "a different spin value parser.add_argument(\"--spin\", type=float, help=\"spin time\", default=1.0, )", "this_device: %r\", this_device) # make a sample application test_application =", "change the point test_bv.presentValue = next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread):", "0, 0, 0], covIncrement=1.0, ) _log.debug(\" - test_av: %r\", test_av)", "obj = test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(\" - obj: %r\", obj)", "COVConsoleCmd._debug(\" - property_name: %r\", property_name) obj = test_application.get_object_name(object_name) if _debug:", "property_name: %r\", property_name) obj = test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(\" -", "ConsoleCmd from bacpypes.core import run, deferred, enable_sleeping from bacpypes.task import", "interval): if _debug: TestBinaryValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000)", "it to send out notifications cov_detection.send_cov_notifications() def do_set(self, args): \"\"\"set", "= self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(\" - next_value: %r\", next_value)", "default=1.0, ) # parse the command line arguments args =", "help=\"binary value thread\", ) # provide a different spin value", "provide a different spin value parser.add_argument(\"--spin\", type=float, help=\"spin time\", default=1.0,", "from bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd from bacpypes.core", "RecurringTask from bacpypes.app import BIPSimpleApplication from bacpypes.object import AnalogValueObject, BinaryValueObject", "thread if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value", "changes the value of the test_av present value. \"\"\" def", "- normalized value: %r\", value) # pass it along obj.WriteProperty(property_name,", "next_value) # change the point test_bv.presentValue = next_value @bacpypes_debugging class", "test_application # make a parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\",", "args) # make a device object this_device = LocalDeviceObject(ini=args.ini) if", "COV detections dict for obj_ref, cov_detection in test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier,", "# enable sleeping will help with threads enable_sleeping() # analog", "process_task(self): if _debug: TestBinaryValueTask._debug(\"process_task\") global test_bv # pop the next", "print(\"exception: %s\" % (err,)) def do_write(self, args): \"\"\"write object_name [", "bacpypes.task import RecurringTask from bacpypes.app import BIPSimpleApplication from bacpypes.object import", "import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd import", "see if it can be built obj_value = datatype(value) if", "= self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(\" - next_value: %r\", next_value)", "@bacpypes_debugging class TestAnalogValueThread(Thread): \"\"\" An instance of this class is", "list of test values self.test_values = [True, False] def run(self):", "interval): if _debug: TestBinaryValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as", "presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0, ) _log.debug(\" - test_av:", ")) def do_trigger(self, args): \"\"\"trigger object_name\"\"\" args = args.split() if", "detection algorithm object cov_detection = test_application.cov_detections.get(obj, None) if (not cov_detection)", "TestBinaryValueTask(RecurringTask): \"\"\" An instance of this class is created when", "parser.add_argument(\"--avthread\", type=float, help=\"analog value thread\", ) # analog value task", "change the value setattr(obj, property_name, value) except IndexError: print(COVConsoleCmd.do_set.__doc__) except", "time.sleep(self.interval) def main(): global test_av, test_bv, test_application # make a", "_debug: COVConsoleCmd._debug(\" - raw value: %r\", value) # see if", "for cov_subscription in cov_detection.cov_subscriptions: print(\" {} proc_id={} confirmed={} lifetime={}\".format( cov_subscription.client_addr,", "return # get the detection algorithm object cov_detection = test_application.cov_detections.get(obj,", "_debug: TestAnalogValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as a daemon", "= [True, False] def run(self): if _debug: TestBinaryValueThread._debug(\"run\") global test_bv", "def do_set(self, args): \"\"\"set object_name [ . ] property_name [", "# get the detection algorithm object cov_detection = test_application.cov_detections.get(obj, None)", "# # COVConsoleCmd # @bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def do_status(self, args):", "COVConsoleCmd() _log.debug(\" - test_console: %r\", test_console) # enable sleeping will", "interval * 1000) # make a list of test values", "object_name: object_name, property_name = object_name.split('.') else: property_name = args.pop(0) if", "global test_bv while True: # pop the next value next_value", "] property [ = ] value\"\"\" args = args.split() if", "evaluate the value value = eval(args.pop(0)) if _debug: COVConsoleCmd._debug(\" -", "analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0,", "self.daemon = True # save the interval self.interval = interval", "recurring task\", ) parser.add_argument(\"--bvthread\", type=float, help=\"binary value thread\", ) #", "the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(\"", "class is created when '--avthread <interval>' is specified as a", "not args: print(\"object name required\") return obj = test_application.get_object_name(args[0]) if", "False] def process_task(self): if _debug: TestBinaryValueTask._debug(\"process_task\") global test_bv # pop", "property_name, value) except IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception as err: print(\"exception:", "# make a list of test values self.test_values = [True,", "different spin value parser.add_argument(\"--spin\", type=float, help=\"spin time\", default=1.0, ) #", "help with threads enable_sleeping() # analog value task if args.avtask:", "normalize value = obj_value.value if _debug: COVConsoleCmd._debug(\" - normalized value:", "with threads enable_sleeping() # analog value task if args.avtask: test_av_task", "list of test values self.test_values = list(100.0 + float(i *", "commands that change the properties of an object that triggers", "cov_subscription.confirmed, cov_subscription.lifetime, )) def do_trigger(self, args): \"\"\"trigger object_name\"\"\" args =", "* 10) for i in range(10)) def run(self): if _debug:", "from bacpypes.task import RecurringTask from bacpypes.app import BIPSimpleApplication from bacpypes.object", "# change the value setattr(obj, property_name, value) except IndexError: print(COVConsoleCmd.do_set.__doc__)", "a parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\", default=False, help=\"create a", "interval): if _debug: TestAnalogValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000)", "objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0, )", "# binary value thread if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start)", "action=\"store_true\", default=False, help=\"create a console\", ) # analog value task", "cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, )) def do_trigger(self, args): \"\"\"trigger object_name\"\"\"", "import time from threading import Thread from bacpypes.debugging import bacpypes_debugging,", "datatype = obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(\" - datatype: %r\", datatype)", "dump from the COV detections dict for obj_ref, cov_detection in", "cov_detection) or (len(cov_detection.cov_subscriptions) == 0): print(\"no subscriptions for that object\")", "] value\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_set %r\", args)", "not obj: raise RuntimeError(\"object not found: %r\" % (object_name,)) datatype", "- this_device: %r\", this_device) # make a sample application test_application", "console\", ) # analog value task and thread parser.add_argument(\"--avtask\", type=float,", "tell it to send out notifications cov_detection.send_cov_notifications() def do_set(self, args):", "bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd", "test values self.test_values = list(100.0 + float(i * 10) for", "test_av present value. \"\"\" def __init__(self, interval): if _debug: TestAnalogValueTask._debug(\"__init__", "(err,)) def do_write(self, args): \"\"\"write object_name [ . ] property", "__init__(self, interval): if _debug: TestAnalogValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval *", "% (object_name,)) datatype = obj.get_datatype(property_name) if _debug: COVConsoleCmd._debug(\" - datatype:", "COVConsoleCmd._debug(\" - normalized value: %r\", value) # pass it along", "= TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\") run(args.spin) _log.debug(\"fini\") if __name__ == \"__main__\":", "from bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices # some", "[ . ] property_name [ = ] value\"\"\" args =", "test values self.test_values = [True, False] def run(self): if _debug:", "self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(\" - next_value: %r\", next_value) # change", "parser = ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\", default=False, help=\"create a console\", )", "as err: print(\"exception: %s\" % (err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask): \"\"\"", "pass # # COVConsoleCmd # @bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def do_status(self,", "except IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception as err: print(\"exception: %s\" %", "run(self): if _debug: TestAnalogValueThread._debug(\"run\") global test_av while True: # pop", "if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value thread", "args) global test_application if not args: print(\"object name required\") return", "TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\") run(args.spin) _log.debug(\"fini\") if __name__ == \"__main__\": main()", "the COV detections dict for obj_ref, cov_detection in test_application.cov_detections.items(): print(\"{}", "# analog value task if args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task()", "TestAnalogValueTask._debug(\"process_task\") global test_av # pop the next value next_value =", "print(COVConsoleCmd.do_write.__doc__) except Exception as err: print(\"exception: %s\" % (err,)) @bacpypes_debugging", "global test_av # pop the next value next_value = self.test_values.pop(0)", "if it can be built obj_value = datatype(value) if _debug:", "next_value: %r\", next_value) # change the point test_bv.presentValue = next_value", "test_av_task.install_task() # analog value thread if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread)", "task\", ) parser.add_argument(\"--avthread\", type=float, help=\"analog value thread\", ) # analog", "self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(\" - next_value: %r\", next_value) # change", "test_application try: object_name = args.pop(0) if '.' in object_name: object_name,", "if _debug: TestBinaryValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as a", "parser.parse_args() if _debug: _log.debug(\"initialization\") if _debug: _log.debug(\" - args: %r\",", "normalized value: %r\", value) # change the value setattr(obj, property_name,", "_debug: TestAnalogValueTask._debug(\" - next_value: %r\", next_value) # change the point", "for i in range(10)) def run(self): if _debug: TestAnalogValueThread._debug(\"run\") global", "cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, )) def do_trigger(self, args): \"\"\"trigger object_name\"\"\" args", "time\", default=1.0, ) # parse the command line arguments args", "args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value task if", "it along obj.WriteProperty(property_name, value) except IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception as", "type=float, help=\"binary value recurring task\", ) parser.add_argument(\"--bvthread\", type=float, help=\"binary value", "device test_application.add_object(test_av) _log.debug(\" - object list: %r\", this_device.objectList) # make", "\"\"\"set object_name [ . ] property_name [ = ] value\"\"\"", "parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\", default=False, help=\"create a console\",", "= next_value @bacpypes_debugging class TestAnalogValueThread(Thread): \"\"\" An instance of this", "(err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask): \"\"\" An instance of this class", "= None test_bv = None test_application = None # #", "_debug: COVConsoleCmd._debug(\" - normalized value: %r\", value) # pass it", "_debug: COVConsoleCmd._debug(\" - normalized value: %r\", value) # change the", "= args.pop(0) if '.' in object_name: object_name, property_name = object_name.split('.')", "binary value thread if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\")", "_debug: TestAnalogValueThread._debug(\"run\") global test_av while True: # pop the next", "present value. \"\"\" def __init__(self, interval): if _debug: TestAnalogValueThread._debug(\"__init__ %r\",", "def __init__(self, interval): if _debug: TestAnalogValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) #", "# analog value task and thread parser.add_argument(\"--avtask\", type=float, help=\"analog value", "value task and thread parser.add_argument(\"--avtask\", type=float, help=\"analog value recurring task\",", "COVConsoleCmd._debug(\" - raw value: %r\", value) # see if it", "+ float(i * 10) for i in range(10)) def run(self):", "def __init__(self, interval): if _debug: TestBinaryValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval", "args): \"\"\"status\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_status %r\", args)", "next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(\" - next_value: %r\",", "import LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices # some debugging _debug", "obj = test_application.get_object_name(args[0]) if not obj: print(\"no such object\") return", "obj_value.value if _debug: COVConsoleCmd._debug(\" - normalized value: %r\", value) #", "print(COVConsoleCmd.do_set.__doc__) except Exception as err: print(\"exception: %s\" % (err,)) def", "self.test_values = [True, False] def run(self): if _debug: TestBinaryValueThread._debug(\"run\") global", "help=\"analog value recurring task\", ) parser.add_argument(\"--avthread\", type=float, help=\"analog value thread\",", ") _log.debug(\" - test_bv: %r\", test_bv) # add it to", "args): \"\"\"set object_name [ . ] property_name [ = ]", "test_application.add_object(test_bv) # make a console if args.console: test_console = COVConsoleCmd()", "object\") return # tell it to send out notifications cov_detection.send_cov_notifications()", "notifications cov_detection.send_cov_notifications() def do_set(self, args): \"\"\"set object_name [ . ]", "instance of this class is created when '--bvtask <interval>' is", ") # provide a different spin value parser.add_argument(\"--spin\", type=float, help=\"spin", "covIncrement=1.0, ) _log.debug(\" - test_av: %r\", test_av) # add it", "to the device test_application.add_object(test_av) _log.debug(\" - object list: %r\", this_device.objectList)", "@bacpypes_debugging class TestAnalogValueTask(RecurringTask): \"\"\" An instance of this class is", "TestAnalogValueThread._debug(\"run\") global test_av while True: # pop the next value", "\"\"\" An instance of this class is created when '--bvtask", "- normalized value: %r\", value) # change the value setattr(obj,", "An instance of this class is created when '--avthread <interval>'", "sample application test_application = SubscribeCOVApplication(this_device, args.ini.address) # make an analog", "from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser from", "import Thread from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import", "property: %r\" % (property_name,)) # toss the equals if args[0]", "%r\", next_value) # change the point test_bv.presentValue = next_value #", "line arguments args = parser.parse_args() if _debug: _log.debug(\"initialization\") if _debug:", "0], covIncrement=1.0, ) _log.debug(\" - test_av: %r\", test_av) # add", "An instance of this class is created when '--bvtask <interval>'", "obj.WriteProperty(property_name, value) except IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception as err: print(\"exception:", "= list(float(i * 10) for i in range(10)) def process_task(self):", "\"\"\" An instance of this class is created when '--bvthread", "object_name = args.pop(0) if '.' in object_name: object_name, property_name =", "created when '--avtask <interval>' is specified as a command line", "(property_name,)) # toss the equals if args[0] == '=': args.pop(0)", "from the COV detections dict for obj_ref, cov_detection in test_application.cov_detections.items():", "test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0,", "None test_bv = None test_application = None # # SubscribeCOVApplication", "object\") return # get the detection algorithm object cov_detection =", "if _debug: COVConsoleCmd._debug(\" - object_name: %r\", object_name) if _debug: COVConsoleCmd._debug(\"", "an object that triggers the notifications. \"\"\" import time from", "COVConsoleCmd._debug(\"do_status %r\", args) global test_application # dump from the COV", "do_write(self, args): \"\"\"write object_name [ . ] property [ =", "is created when '--bvtask <interval>' is specified as a command", "or (len(cov_detection.cov_subscriptions) == 0): print(\"no subscriptions for that object\") return", "value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(\" - next_value:", "%r\", interval) RecurringTask.__init__(self, interval * 1000) # save the interval", "values self.test_values = [True, False] def process_task(self): if _debug: TestBinaryValueTask._debug(\"process_task\")", "_debug: _log.debug(\" - args: %r\", args) # make a device", "%r\", test_console) # enable sleeping will help with threads enable_sleeping()", "cov_detection = test_application.cov_detections.get(obj, None) if (not cov_detection) or (len(cov_detection.cov_subscriptions) ==", "parser.add_argument(\"--spin\", type=float, help=\"spin time\", default=1.0, ) # parse the command", "args: print(\"object name required\") return obj = test_application.get_object_name(args[0]) if not", "supports COV notification services. The console accepts commands that change", "class TestAnalogValueThread(Thread): \"\"\" An instance of this class is created", "args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value thread if", "global test_application # dump from the COV detections dict for", "task and thread parser.add_argument(\"--avtask\", type=float, help=\"analog value recurring task\", )", "value: %r\", value) # see if it can be built", "COVConsoleCmd(ConsoleCmd): def do_status(self, args): \"\"\"status\"\"\" args = args.split() if _debug:", "LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices # some debugging _debug =", "out notifications cov_detection.send_cov_notifications() def do_set(self, args): \"\"\"set object_name [ .", "%s\" % (err,)) def do_write(self, args): \"\"\"write object_name [ .", "if _debug: TestBinaryValueThread._debug(\" - next_value: %r\", next_value) # change the", "in object_name: object_name, property_name = object_name.split('.') else: property_name = args.pop(0)", "%s\" % (err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask): \"\"\" An instance of", ") parser.add_argument(\"--bvthread\", type=float, help=\"binary value thread\", ) # provide a", "global test_bv # pop the next value next_value = self.test_values.pop(0)", "specified as a command line argument. Every <interval> seconds it", "next_value) # change the point test_bv.presentValue = next_value # sleep", "%r\", next_value) # change the point test_bv.presentValue = next_value @bacpypes_debugging", "of an object that triggers the notifications. \"\"\" import time", "# pass it along obj.WriteProperty(property_name, value) except IndexError: print(COVConsoleCmd.do_write.__doc__) except", ") # analog value task and thread parser.add_argument(\"--bvtask\", type=float, help=\"binary", "args.split() if _debug: COVConsoleCmd._debug(\"do_status %r\", args) global test_application # dump", "object this_device = LocalDeviceObject(ini=args.ini) if _debug: _log.debug(\" - this_device: %r\",", "_debug: COVConsoleCmd._debug(\"do_set %r\", args) global test_application try: object_name = args.pop(0)", "__init__(self, interval): if _debug: TestAnalogValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs", "help=\"spin time\", default=1.0, ) # parse the command line arguments", "_log.debug(\"initialization\") if _debug: _log.debug(\" - args: %r\", args) # make", "pass it along obj.WriteProperty(property_name, value) except IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception", "import AnalogValueObject, BinaryValueObject from bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov import", "# evaluate the value value = eval(args.pop(0)) if _debug: COVConsoleCmd._debug(\"", "normalized value: %r\", value) # pass it along obj.WriteProperty(property_name, value)", "# make a parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument(\"--console\", action=\"store_true\", default=False,", "<interval> seconds it changes the value of the test_av present", "_log.debug(\" - test_av: %r\", test_av) # add it to the", "_log.debug(\" - test_console: %r\", test_console) # enable sleeping will help", "test_console) # enable sleeping will help with threads enable_sleeping() #", "% (err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask): \"\"\" An instance of this", "sleep time.sleep(self.interval) def main(): global test_av, test_bv, test_application # make", "_debug: _log.debug(\" - this_device: %r\", this_device) # make a sample", "deferred, enable_sleeping from bacpypes.task import RecurringTask from bacpypes.app import BIPSimpleApplication", "interval) RecurringTask.__init__(self, interval * 1000) # save the interval self.interval", "TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value thread if args.avthread: test_av_thread =", "%r\", next_value) # change the point test_av.presentValue = next_value #", "parser.add_argument(\"--console\", action=\"store_true\", default=False, help=\"create a console\", ) # analog value", "def do_write(self, args): \"\"\"write object_name [ . ] property [", "the point test_av.presentValue = next_value # sleep time.sleep(self.interval) @bacpypes_debugging class", "print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription in cov_detection.cov_subscriptions: print(\" {} proc_id={}", "object_name [ . ] property [ = ] value\"\"\" args", "not obj: print(\"no such object\") return # get the detection", "if _debug: TestAnalogValueTask._debug(\" - next_value: %r\", next_value) # change the", "try: object_name = args.pop(0) if '.' in object_name: object_name, property_name", "] property_name [ = ] value\"\"\" args = args.split() if", "created when '--bvthread <interval>' is specified as a command line", "a command line argument. Every <interval> seconds it changes the", "make a list of test values self.test_values = list(100.0 +", "= TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value thread if args.avthread: test_av_thread", "if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value task", "# tell it to send out notifications cov_detection.send_cov_notifications() def do_set(self,", "range(10)) def process_task(self): if _debug: TestAnalogValueTask._debug(\"process_task\") global test_av # pop", "return # tell it to send out notifications cov_detection.send_cov_notifications() def", "0): print(\"no subscriptions for that object\") return # tell it", "= list(100.0 + float(i * 10) for i in range(10))", "in range(10)) def run(self): if _debug: TestAnalogValueThread._debug(\"run\") global test_av while", "= AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0],", "test globals test_av = None test_bv = None test_application =", "\"\"\"status\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_status %r\", args) global", "raise RuntimeError(\"not a property: %r\" % (property_name,)) # toss the", "the value value = eval(args.pop(0)) if _debug: COVConsoleCmd._debug(\" - raw", "class TestBinaryValueTask(RecurringTask): \"\"\" An instance of this class is created", "point test_bv.presentValue = next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread): \"\"\" An", "COVConsoleCmd._debug(\"do_trigger %r\", args) global test_application if not args: print(\"object name", "next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(\" - next_value: %r\",", "property_name = args.pop(0) if _debug: COVConsoleCmd._debug(\" - object_name: %r\", object_name)", "_debug: TestBinaryValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as a daemon", "object_name.split('.') else: property_name = args.pop(0) if _debug: COVConsoleCmd._debug(\" - object_name:", "this class is created when '--avthread <interval>' is specified as", "next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(\" - next_value: %r\",", "raw value: %r\", value) # see if it can be", "present value. \"\"\" def __init__(self, interval): if _debug: TestBinaryValueTask._debug(\"__init__ %r\",", "args.console: test_console = COVConsoleCmd() _log.debug(\" - test_console: %r\", test_console) #", "if _debug: TestBinaryValueThread._debug(\"run\") global test_bv while True: # pop the", "if not obj: print(\"no such object\") return # get the", "default=False, help=\"create a console\", ) # analog value task and", "test_av, test_bv, test_application # make a parser parser = ConfigArgumentParser(description=__doc__)", "'--bvthread <interval>' is specified as a command line argument. Every", "test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value thread if args.avthread:", "cov_subscription in cov_detection.cov_subscriptions: print(\" {} proc_id={} confirmed={} lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id,", "equals if args[0] == '=': args.pop(0) # evaluate the value", "interval # make a list of test values self.test_values =", ") parser.add_argument(\"--avthread\", type=float, help=\"analog value thread\", ) # analog value", "this class is created when '--avtask <interval>' is specified as", "a console if args.console: test_console = COVConsoleCmd() _log.debug(\" - test_console:", "argument. Every <interval> seconds it changes the value of the", "interval * 1000) # save the interval self.interval = interval", "% (property_name,)) # toss the equals if args[0] == '=':", "class is created when '--bvtask <interval>' is specified as a", "self.test_values = list(float(i * 10) for i in range(10)) def", "TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value task if args.bvtask: test_bv_task =", "= SubscribeCOVApplication(this_device, args.ini.address) # make an analog value object test_av", "class TestBinaryValueThread(RecurringTask, Thread): \"\"\" An instance of this class is", "'--avthread <interval>' is specified as a command line argument. Every", "args = args.split() if _debug: COVConsoleCmd._debug(\"do_set %r\", args) global test_application", "that object\") return # tell it to send out notifications", "\"\"\" def __init__(self, interval): if _debug: TestAnalogValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self)", "The console accepts commands that change the properties of an", "created when '--bvtask <interval>' is specified as a command line", "TestBinaryValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval * 1000) # save the", "args.ini.address) # make an analog value object test_av = AnalogValueObject(", "i in range(10)) def run(self): if _debug: TestAnalogValueThread._debug(\"run\") global test_av", "application is a server that supports COV notification services. The", "%r\", args) global test_application if not args: print(\"object name required\")", "if _debug: COVConsoleCmd._debug(\" - normalized value: %r\", value) # pass", "object list: %r\", this_device.objectList) # make a binary value object", "globals test_av = None test_bv = None test_application = None", "test_bv present value. \"\"\" def __init__(self, interval): if _debug: TestBinaryValueThread._debug(\"__init__", "COVConsoleCmd._debug(\"do_set %r\", args) global test_application try: object_name = args.pop(0) if", "(len(cov_detection.cov_subscriptions) == 0): print(\"no subscriptions for that object\") return #", "# parse the command line arguments args = parser.parse_args() if", "self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(\" - next_value: %r\", next_value) #", "that triggers the notifications. \"\"\" import time from threading import", "RecurringTask.__init__(self, interval * 1000) # save the interval self.interval =", "_debug: COVConsoleCmd._debug(\" - datatype: %r\", datatype) if not datatype: raise", "main(): global test_av, test_bv, test_application # make a parser parser", "test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\") run(args.spin) _log.debug(\"fini\") if __name__ ==", "if _debug: COVConsoleCmd._debug(\"do_status %r\", args) global test_application # dump from", "value object test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0,", "a list of test values self.test_values = list(100.0 + float(i", "bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd from bacpypes.core import", "# pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if", "IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception as err: print(\"exception: %s\" % (err,))", "import ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd from bacpypes.core import run,", "the properties of an object that triggers the notifications. \"\"\"", "[ = ] value\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_set", "parser.add_argument(\"--avtask\", type=float, help=\"analog value recurring task\", ) parser.add_argument(\"--avthread\", type=float, help=\"analog", "parser.add_argument(\"--bvthread\", type=float, help=\"binary value thread\", ) # provide a different", "object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0,", "%r\", interval) Thread.__init__(self) # runs as a daemon self.daemon =", "run, deferred, enable_sleeping from bacpypes.task import RecurringTask from bacpypes.app import", "# some debugging _debug = 0 _log = ModuleLogger(globals()) #", "= test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(\" - obj: %r\", obj) if", "the equals if args[0] == '=': args.pop(0) # evaluate the", "if '.' in object_name: object_name, property_name = object_name.split('.') else: property_name", "eval(args.pop(0)) if _debug: COVConsoleCmd._debug(\" - raw value: %r\", value) #", "list: %r\", this_device.objectList) # make a binary value object test_bv", "interval self.interval = interval # make a list of test", "None) if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0): print(\"no subscriptions", "from bacpypes.core import run, deferred, enable_sleeping from bacpypes.task import RecurringTask", "[ . ] property [ = ] value\"\"\" args =", "except IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception as err: print(\"exception: %s\" %", "= args.pop(0) if _debug: COVConsoleCmd._debug(\" - object_name: %r\", object_name) if", "datatype(value) if _debug: COVConsoleCmd._debug(\" - obj_value: %r\", obj_value) # normalize", "time from threading import Thread from bacpypes.debugging import bacpypes_debugging, ModuleLogger", "sleep time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask): \"\"\" An instance of this", "COVConsoleCmd._debug(\" - obj: %r\", obj) if not obj: raise RuntimeError(\"object", "daemon self.daemon = True # save the interval self.interval =", "a list of test values self.test_values = [True, False] def", "@bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread): \"\"\" An instance of this class", "type=float, help=\"binary value thread\", ) # provide a different spin", "if _debug: _log.debug(\"initialization\") if _debug: _log.debug(\" - args: %r\", args)", "# see if it can be built obj_value = datatype(value)", "bacpypes.consolecmd import ConsoleCmd from bacpypes.core import run, deferred, enable_sleeping from", "# analog value task and thread parser.add_argument(\"--bvtask\", type=float, help=\"binary value", "parse the command line arguments args = parser.parse_args() if _debug:", "test_bv_task.install_task() # binary value thread if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread)", "notifications. \"\"\" import time from threading import Thread from bacpypes.debugging", "as a command line argument. Every <interval> seconds it changes", "test_bv: %r\", test_bv) # add it to the device test_application.add_object(test_bv)", "point test_av.presentValue = next_value # sleep time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask):", "raise RuntimeError(\"object not found: %r\" % (object_name,)) datatype = obj.get_datatype(property_name)", "task if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value", "test_application = SubscribeCOVApplication(this_device, args.ini.address) # make an analog value object", "services. The console accepts commands that change the properties of", "save the interval self.interval = interval # make a list", "is created when '--bvthread <interval>' is specified as a command", "type=float, help=\"spin time\", default=1.0, ) # parse the command line", "python \"\"\" This sample application is a server that supports", "= self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(\" - next_value: %r\", next_value)", "it to the device test_application.add_object(test_bv) # make a console if", "- property_name: %r\", property_name) obj = test_application.get_object_name(object_name) if _debug: COVConsoleCmd._debug(\"", "thread if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\") run(args.spin) _log.debug(\"fini\")", "of test values self.test_values = list(100.0 + float(i * 10)", "err: print(\"exception: %s\" % (err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask): \"\"\" An", "- obj: %r\", obj) if not obj: raise RuntimeError(\"object not", "= args.split() if _debug: COVConsoleCmd._debug(\"do_trigger %r\", args) global test_application if", "object cov_detection = test_application.cov_detections.get(obj, None) if (not cov_detection) or (len(cov_detection.cov_subscriptions)", "list of test values self.test_values = list(float(i * 10) for", "10) for i in range(10)) def run(self): if _debug: TestAnalogValueThread._debug(\"run\")", "obj) if not obj: raise RuntimeError(\"object not found: %r\" %", "= args.split() if _debug: COVConsoleCmd._debug(\"do_set %r\", args) global test_application try:", "_debug: COVConsoleCmd._debug(\" - property_name: %r\", property_name) obj = test_application.get_object_name(object_name) if", "\"\"\" def __init__(self, interval): if _debug: TestAnalogValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self,", "= [True, False] def process_task(self): if _debug: TestBinaryValueTask._debug(\"process_task\") global test_bv", "test_av.presentValue = next_value @bacpypes_debugging class TestAnalogValueThread(Thread): \"\"\" An instance of", "# sleep time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask): \"\"\" An instance of", "present value. \"\"\" def __init__(self, interval): if _debug: TestBinaryValueThread._debug(\"__init__ %r\",", "object_name\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_trigger %r\", args) global", "help=\"binary value recurring task\", ) parser.add_argument(\"--bvthread\", type=float, help=\"binary value thread\",", "from bacpypes.app import BIPSimpleApplication from bacpypes.object import AnalogValueObject, BinaryValueObject from", "# analog value thread if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start)", "the point test_bv.presentValue = next_value @bacpypes_debugging class TestBinaryValueThread(RecurringTask, Thread): \"\"\"", "test_console: %r\", test_console) # enable sleeping will help with threads", "type=float, help=\"analog value recurring task\", ) parser.add_argument(\"--avthread\", type=float, help=\"analog value", "\"\"\" import time from threading import Thread from bacpypes.debugging import", "test_av = None test_bv = None test_application = None #", "time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask): \"\"\" An instance of this class", "value of the test_av present value. \"\"\" def __init__(self, interval):", "# runs as a daemon self.daemon = True # save", "# # SubscribeCOVApplication # @bacpypes_debugging class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass #", "class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices): pass # # COVConsoleCmd # @bacpypes_debugging class", "= BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0, 0],", "if args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value thread", "TestBinaryValueThread._debug(\"run\") global test_bv while True: # pop the next value", "_debug: COVConsoleCmd._debug(\"do_trigger %r\", args) global test_application if not args: print(\"object", "from bacpypes.object import AnalogValueObject, BinaryValueObject from bacpypes.local.device import LocalDeviceObject from", "%r\", test_bv) # add it to the device test_application.add_object(test_bv) #", "toss the equals if args[0] == '=': args.pop(0) # evaluate", "%r\" % (property_name,)) # toss the equals if args[0] ==", "- raw value: %r\", value) # see if it can", "This sample application is a server that supports COV notification", "in test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref)) for cov_subscription in cov_detection.cov_subscriptions: print(\"", "changes the value of the test_bv present value. \"\"\" def", "class is created when '--bvthread <interval>' is specified as a", "subscriptions for that object\") return # tell it to send", "args = args.split() if _debug: COVConsoleCmd._debug(\"do_trigger %r\", args) global test_application", "and thread parser.add_argument(\"--bvtask\", type=float, help=\"binary value recurring task\", ) parser.add_argument(\"--bvthread\",", "objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0, 0], ) _log.debug(\" - test_bv:", "Every <interval> seconds it changes the value of the test_bv", "test_av: %r\", test_av) # add it to the device test_application.add_object(test_av)", "1000) # make a list of test values self.test_values =", "detections dict for obj_ref, cov_detection in test_application.cov_detections.items(): print(\"{} {}\".format(obj_ref.objectIdentifier, obj_ref))", "value. \"\"\" def __init__(self, interval): if _debug: TestBinaryValueTask._debug(\"__init__ %r\", interval)", "\"\"\" An instance of this class is created when '--avthread", "- next_value: %r\", next_value) # change the point test_av.presentValue =", "value. \"\"\" def __init__(self, interval): if _debug: TestAnalogValueThread._debug(\"__init__ %r\", interval)", "* 1000) # save the interval self.interval = interval #", "when '--avthread <interval>' is specified as a command line argument.", "next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(\" -", "bacpypes.core import run, deferred, enable_sleeping from bacpypes.task import RecurringTask from", "spin value parser.add_argument(\"--spin\", type=float, help=\"spin time\", default=1.0, ) # parse", "a device object this_device = LocalDeviceObject(ini=args.ini) if _debug: _log.debug(\" -", "obj: print(\"no such object\") return # get the detection algorithm", "# sleep time.sleep(self.interval) def main(): global test_av, test_bv, test_application #", "value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueThread._debug(\" - next_value:", "TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value thread if args.bvthread: test_bv_thread =", "#!/usr/bin/env python \"\"\" This sample application is a server that", "%r\", this_device) # make a sample application test_application = SubscribeCOVApplication(this_device,", "AnalogValueObject, BinaryValueObject from bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices", "the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(\"", "* 10) for i in range(10)) def process_task(self): if _debug:", "%r\", args) global test_application # dump from the COV detections", "if _debug: COVConsoleCmd._debug(\" - raw value: %r\", value) # see", "if not obj: raise RuntimeError(\"object not found: %r\" % (object_name,))", "def __init__(self, interval): if _debug: TestAnalogValueTask._debug(\"__init__ %r\", interval) RecurringTask.__init__(self, interval", "seconds it changes the value of the test_av present value.", "True: # pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value)", "of this class is created when '--bvtask <interval>' is specified", "if _debug: COVConsoleCmd._debug(\" - datatype: %r\", datatype) if not datatype:", ") # analog value task and thread parser.add_argument(\"--avtask\", type=float, help=\"analog", "# save the interval self.interval = interval # make a", "* 1000) # make a list of test values self.test_values", "next_value # sleep time.sleep(self.interval) def main(): global test_av, test_bv, test_application", "value\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_set %r\", args) global", "will help with threads enable_sleeping() # analog value task if", "SubscribeCOVApplication(this_device, args.ini.address) # make an analog value object test_av =", "# dump from the COV detections dict for obj_ref, cov_detection", "instance of this class is created when '--avthread <interval>' is", "not found: %r\" % (object_name,)) datatype = obj.get_datatype(property_name) if _debug:", "value recurring task\", ) parser.add_argument(\"--bvthread\", type=float, help=\"binary value thread\", )", "a console\", ) # analog value task and thread parser.add_argument(\"--avtask\",", "enable sleeping will help with threads enable_sleeping() # analog value", "recurring task\", ) parser.add_argument(\"--avthread\", type=float, help=\"analog value thread\", ) #", "self.interval = interval # make a list of test values", "values self.test_values = list(100.0 + float(i * 10) for i", "@bacpypes_debugging class TestBinaryValueTask(RecurringTask): \"\"\" An instance of this class is", "value. \"\"\" def __init__(self, interval): if _debug: TestAnalogValueTask._debug(\"__init__ %r\", interval)", "COVConsoleCmd # @bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def do_status(self, args): \"\"\"status\"\"\" args", "self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(\" - next_value: %r\", next_value) # change", "is created when '--avtask <interval>' is specified as a command", "% (err,)) def do_write(self, args): \"\"\"write object_name [ . ]", "- next_value: %r\", next_value) # change the point test_bv.presentValue =", "objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0, 0], ) _log.debug(\"", "Exception as err: print(\"exception: %s\" % (err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask):", "values self.test_values = list(float(i * 10) for i in range(10))", "objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0, ) _log.debug(\" -", "change the properties of an object that triggers the notifications.", "ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd from bacpypes.core import run, deferred,", "- object_name: %r\", object_name) if _debug: COVConsoleCmd._debug(\" - property_name: %r\",", "as err: print(\"exception: %s\" % (err,)) def do_write(self, args): \"\"\"write", "args = parser.parse_args() if _debug: _log.debug(\"initialization\") if _debug: _log.debug(\" -", "task\", ) parser.add_argument(\"--bvthread\", type=float, help=\"binary value thread\", ) # provide", "do_set(self, args): \"\"\"set object_name [ . ] property_name [ =", "%r\", obj_value) # normalize value = obj_value.value if _debug: COVConsoleCmd._debug(\"", "if _debug: COVConsoleCmd._debug(\" - normalized value: %r\", value) # change", "\"\"\" def __init__(self, interval): if _debug: TestBinaryValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self)", "pop the next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug:", "value thread\", ) # provide a different spin value parser.add_argument(\"--spin\",", "of test values self.test_values = [True, False] def process_task(self): if", "debugging _debug = 0 _log = ModuleLogger(globals()) # test globals", "send out notifications cov_detection.send_cov_notifications() def do_set(self, args): \"\"\"set object_name [", "make a device object this_device = LocalDeviceObject(ini=args.ini) if _debug: _log.debug(\"", "TestAnalogValueTask(RecurringTask): \"\"\" An instance of this class is created when", "this_device.objectList) # make a binary value object test_bv = BinaryValueObject(", "analog value task and thread parser.add_argument(\"--avtask\", type=float, help=\"analog value recurring", "make a list of test values self.test_values = list(float(i *", "list of test values self.test_values = [True, False] def process_task(self):", "seconds it changes the value of the test_bv present value.", "_log.debug(\" - object list: %r\", this_device.objectList) # make a binary", "def process_task(self): if _debug: TestAnalogValueTask._debug(\"process_task\") global test_av # pop the", "global test_application try: object_name = args.pop(0) if '.' in object_name:", "get the detection algorithm object cov_detection = test_application.cov_detections.get(obj, None) if", "deferred(test_av_thread.start) # binary value task if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask)", "# toss the equals if args[0] == '=': args.pop(0) #", "test_application.cov_detections.get(obj, None) if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0): print(\"no", "0, 0, 0], ) _log.debug(\" - test_bv: %r\", test_bv) #", "for that object\") return # tell it to send out", "value thread if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\") run(args.spin)", "= datatype(value) if _debug: COVConsoleCmd._debug(\" - obj_value: %r\", obj_value) #", "when '--bvthread <interval>' is specified as a command line argument.", "proc_id={} confirmed={} lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, )) def do_trigger(self,", "import ConsoleCmd from bacpypes.core import run, deferred, enable_sleeping from bacpypes.task", "if _debug: TestAnalogValueThread._debug(\"run\") global test_av while True: # pop the", "class is created when '--avtask <interval>' is specified as a", "next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(\" -", "if not datatype: raise RuntimeError(\"not a property: %r\" % (property_name,))", "# normalize value = obj_value.value if _debug: COVConsoleCmd._debug(\" - normalized", "interval) RecurringTask.__init__(self, interval * 1000) # make a list of", "_debug: TestBinaryValueThread._debug(\"run\") global test_bv while True: # pop the next", "= None test_application = None # # SubscribeCOVApplication # @bacpypes_debugging", "BinaryValueObject from bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices #", "\"\"\"trigger object_name\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_trigger %r\", args)", "value value = eval(args.pop(0)) if _debug: COVConsoleCmd._debug(\" - raw value:", "enable_sleeping() # analog value task if args.avtask: test_av_task = TestAnalogValueTask(args.avtask)", "bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices # some debugging", "when '--avtask <interval>' is specified as a command line argument.", "parser.add_argument(\"--bvtask\", type=float, help=\"binary value recurring task\", ) parser.add_argument(\"--bvthread\", type=float, help=\"binary", "bacpypes.app import BIPSimpleApplication from bacpypes.object import AnalogValueObject, BinaryValueObject from bacpypes.local.device", "= 0 _log = ModuleLogger(globals()) # test globals test_av =", "for i in range(10)) def process_task(self): if _debug: TestAnalogValueTask._debug(\"process_task\") global", "in range(10)) def process_task(self): if _debug: TestAnalogValueTask._debug(\"process_task\") global test_av #", "a daemon self.daemon = True # save the interval self.interval", "\"\"\"write object_name [ . ] property [ = ] value\"\"\"", "command line arguments args = parser.parse_args() if _debug: _log.debug(\"initialization\") if", "AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0,", "import ChangeOfValueServices # some debugging _debug = 0 _log =", "<interval>' is specified as a command line argument. Every <interval>", "process_task(self): if _debug: TestAnalogValueTask._debug(\"process_task\") global test_av # pop the next", "help=\"analog value thread\", ) # analog value task and thread", "test_av) # add it to the device test_application.add_object(test_av) _log.debug(\" -", "# make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue',", "the interval self.interval = interval # make a list of", "10) for i in range(10)) def process_task(self): if _debug: TestAnalogValueTask._debug(\"process_task\")", "= self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(\" - next_value: %r\", next_value)", "statusFlags=[0, 0, 0, 0], ) _log.debug(\" - test_bv: %r\", test_bv)", "obj_ref)) for cov_subscription in cov_detection.cov_subscriptions: print(\" {} proc_id={} confirmed={} lifetime={}\".format(", "if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0): print(\"no subscriptions for", "'=': args.pop(0) # evaluate the value value = eval(args.pop(0)) if", "datatype: %r\", datatype) if not datatype: raise RuntimeError(\"not a property:", "False] def run(self): if _debug: TestBinaryValueThread._debug(\"run\") global test_bv while True:", "cov_detection.cov_subscriptions: print(\" {} proc_id={} confirmed={} lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime,", "= test_application.cov_detections.get(obj, None) if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0):", "args): \"\"\"write object_name [ . ] property [ = ]", "from threading import Thread from bacpypes.debugging import bacpypes_debugging, ModuleLogger from", "change the point test_av.presentValue = next_value @bacpypes_debugging class TestAnalogValueThread(Thread): \"\"\"", "next_value # sleep time.sleep(self.interval) @bacpypes_debugging class TestBinaryValueTask(RecurringTask): \"\"\" An instance", "the point test_bv.presentValue = next_value # sleep time.sleep(self.interval) def main():", "# make a sample application test_application = SubscribeCOVApplication(this_device, args.ini.address) #", "ModuleLogger(globals()) # test globals test_av = None test_bv = None", "except Exception as err: print(\"exception: %s\" % (err,)) def do_write(self,", "(not cov_detection) or (len(cov_detection.cov_subscriptions) == 0): print(\"no subscriptions for that", "%r\", args) global test_application try: object_name = args.pop(0) if '.'", "the device test_application.add_object(test_av) _log.debug(\" - object list: %r\", this_device.objectList) #", "= interval # make a list of test values self.test_values", "\"\"\" An instance of this class is created when '--avtask", "point test_bv.presentValue = next_value # sleep time.sleep(self.interval) def main(): global", "self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(\" - next_value: %r\", next_value) #", "print(\"exception: %s\" % (err,)) @bacpypes_debugging class TestAnalogValueTask(RecurringTask): \"\"\" An instance", "value task if args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog", "list(float(i * 10) for i in range(10)) def process_task(self): if", "# binary value task if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task()", "= TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value thread if args.bvthread: test_bv_thread", "return obj = test_application.get_object_name(args[0]) if not obj: print(\"no such object\")", "{} proc_id={} confirmed={} lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed, cov_subscription.lifetime, )) def", "global test_av while True: # pop the next value next_value", "of test values self.test_values = list(float(i * 10) for i", "analog value thread if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) #", "TestAnalogValueThread(Thread): \"\"\" An instance of this class is created when", "Thread): \"\"\" An instance of this class is created when", "_debug: COVConsoleCmd._debug(\" - obj: %r\", obj) if not obj: raise", "= True # save the interval self.interval = interval #", "instance of this class is created when '--avtask <interval>' is", "add it to the device test_application.add_object(test_av) _log.debug(\" - object list:", "do_trigger(self, args): \"\"\"trigger object_name\"\"\" args = args.split() if _debug: COVConsoleCmd._debug(\"do_trigger", ". ] property [ = ] value\"\"\" args = args.split()", "- test_av: %r\", test_av) # add it to the device", "= TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value task if args.bvtask: test_bv_task", "next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestBinaryValueTask._debug(\" - next_value: %r\",", "test values self.test_values = [True, False] def process_task(self): if _debug:", "object_name, property_name = object_name.split('.') else: property_name = args.pop(0) if _debug:", "run(self): if _debug: TestBinaryValueThread._debug(\"run\") global test_bv while True: # pop", "_log.debug(\" - test_bv: %r\", test_bv) # add it to the", "the detection algorithm object cov_detection = test_application.cov_detections.get(obj, None) if (not", "make a binary value object test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1),", "value task if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary", "next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueTask._debug(\" -", "# COVConsoleCmd # @bacpypes_debugging class COVConsoleCmd(ConsoleCmd): def do_status(self, args): \"\"\"status\"\"\"", "test_av while True: # pop the next value next_value =", "value: %r\", value) # pass it along obj.WriteProperty(property_name, value) except", "next value next_value = self.test_values.pop(0) self.test_values.append(next_value) if _debug: TestAnalogValueThread._debug(\" -", "TestAnalogValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as a daemon self.daemon", "args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug(\"running\") run(args.spin) _log.debug(\"fini\") if __name__", "next_value @bacpypes_debugging class TestAnalogValueThread(Thread): \"\"\" An instance of this class", "test_application.get_object_name(args[0]) if not obj: print(\"no such object\") return # get", "object test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0, 0,", "object_name: %r\", object_name) if _debug: COVConsoleCmd._debug(\" - property_name: %r\", property_name)", "1000) # save the interval self.interval = interval # make", ") _log.debug(\" - test_av: %r\", test_av) # add it to", "if _debug: COVConsoleCmd._debug(\" - obj: %r\", obj) if not obj:", "in cov_detection.cov_subscriptions: print(\" {} proc_id={} confirmed={} lifetime={}\".format( cov_subscription.client_addr, cov_subscription.proc_id, cov_subscription.confirmed,", "thread\", ) # analog value task and thread parser.add_argument(\"--bvtask\", type=float,", "value recurring task\", ) parser.add_argument(\"--avthread\", type=float, help=\"analog value thread\", )", "args.split() if _debug: COVConsoleCmd._debug(\"do_set %r\", args) global test_application try: object_name", "def run(self): if _debug: TestBinaryValueThread._debug(\"run\") global test_bv while True: #", "is specified as a command line argument. Every <interval> seconds", "args) global test_application # dump from the COV detections dict", "setattr(obj, property_name, value) except IndexError: print(COVConsoleCmd.do_set.__doc__) except Exception as err:", "this class is created when '--bvtask <interval>' is specified as", "of the test_bv present value. \"\"\" def __init__(self, interval): if", "IndexError: print(COVConsoleCmd.do_write.__doc__) except Exception as err: print(\"exception: %s\" % (err,))", "built obj_value = datatype(value) if _debug: COVConsoleCmd._debug(\" - obj_value: %r\",", "TestBinaryValueThread._debug(\"__init__ %r\", interval) Thread.__init__(self) # runs as a daemon self.daemon", "_debug: _log.debug(\"initialization\") if _debug: _log.debug(\" - args: %r\", args) #", "a binary value object test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv',", "it can be built obj_value = datatype(value) if _debug: COVConsoleCmd._debug(\"", "test_application if not args: print(\"object name required\") return obj =" ]
[ "from django.conf import settings def add(request, friend): phantomjs = os.path.join(settings.PROJECT_PATH,", "print \"sexy\" out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']]) print out", "script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs, script, friend,", "def extract(request): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script", "django.conf import settings def add(request, friend): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface',", "\"sexy\" out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']]) print out return", "request.user.get_profile().facebook_pass]) except: return False return True def extract(request): phantomjs =", "script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js') print \"sexy\" out =", "out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']]) print out return \"user", "'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs, script,", "False return True def extract(request): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender',", "os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender',", "os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js') print \"sexy\" out = subprocess.check_output([phantomjs, script,", "django.http import HttpResponse from django.conf import settings def add(request, friend):", "'glassface', 'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js')", "'glassface', 'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js')", "def add(request, friend): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs')", "= os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email,", "friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return False return True def extract(request):", "extract(request): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script =", "import platform import subprocess from django.http import HttpResponse from django.conf", "os import platform import subprocess from django.http import HttpResponse from", "import subprocess from django.http import HttpResponse from django.conf import settings", "subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return False return True", "script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return False return True def", "'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js') try:", "platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js') print \"sexy\"", "except: return False return True def extract(request): phantomjs = os.path.join(settings.PROJECT_PATH,", "'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return", "True def extract(request): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs')", "'glassface', 'facebookfriender', 'useridextractor.js') print \"sexy\" out = subprocess.check_output([phantomjs, script, request.POST['email'],", "try: subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return False return", "from django.http import HttpResponse from django.conf import settings def add(request,", "os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass])", "import os import platform import subprocess from django.http import HttpResponse", "= os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js') print \"sexy\" out = subprocess.check_output([phantomjs,", "request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return False return True def extract(request): phantomjs", "'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js') print", "'facebookfriender.js') try: subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except: return False", "= subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']]) print out return \"user id", "'facebookfriender', 'useridextractor.js') print \"sexy\" out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']])", "phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH,", "platform import subprocess from django.http import HttpResponse from django.conf import", "= os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface',", "'glassface', 'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass]) except:", "'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js') print \"sexy\" out", "friend): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script =", "import settings def add(request, friend): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender',", "'useridextractor.js') print \"sexy\" out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']]) print", "HttpResponse from django.conf import settings def add(request, friend): phantomjs =", "platform.system(), 'phantomjs') script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js') try: subprocess.call([phantomjs,", "return True def extract(request): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(),", "subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']]) print out return \"user id goes", "subprocess from django.http import HttpResponse from django.conf import settings def", "return False return True def extract(request): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface',", "script, request.POST['email'], request.POST['password']]) print out return \"user id goes here\"", "add(request, friend): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs') script", "import HttpResponse from django.conf import settings def add(request, friend): phantomjs", "settings def add(request, friend): phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system()," ]
[ "\"\"\" # Specify the target classes target_string = st.selectbox(\"Select Target", "random import numpy as np import pandas as pd import", "Purpose: Renders a classification_report Args: df - Pandas dataframe Returns:", "\"\"\" # Prep data for model training ( test_features, train_features,", "st.selectbox(\"Select Class Column\", df.columns, index = len(df.columns) - 1) visualizer", "# Split Data randInt = random.randint(1, 200) ( test_features, train_features,", "Prep data for modeling Args: df - Pandas dataframe Returns:", "problem detection, the report integrates numerical scores with a color-coded", "yellowbrick.target import ClassBalance from streamlit_yellowbrick import st_yellowbrick from typing import", "def class_balance(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders a class", "test_size=0.75, random_state=randInt) return ( test_features, train_features, test_target, train_target, ) def", "graph Args: df - Pandas dataframe Returns: N/A \"\"\" target_string", "import streamlit as st from sklearn.naive_bayes import GaussianNB from sklearn.model_selection", "as np import pandas as pd import streamlit as st", "fig = visualizer.fig ax = visualizer.show() fig.axes.append(ax) # show the", "List, Tuple import plotly.express as px def data_prep(df: pd.DataFrame) ->", "st.selectbox(\"Select the correlation method\", ['mutual_info-classification', 'pearson']) else: method = st.selectbox(\"Select", "if type_problem == 'classification': method = st.selectbox(\"Select the correlation method\",", "test set target train_target - train set target \"\"\" #", "= data_prep(df) if st.button(\"Train Model\"): st.header(\"Classification Report\") st.markdown( \"The classification", "Model\"): st.header(\"Classification Report\") st.markdown( \"The classification report visualizer displays the", "color-coded heatmap. All heatmaps are in the range (0.0, 1.0)", "modeling Args: df - Pandas dataframe Returns: test_features - test", "report visualizer displays the precision, recall, F1, and support scores", "Features\", df.columns) # Get all features features = df[feature_cols] featurestmp", "index = len(df.columns) - 1) visualizer = ClassBalance(labels = df[classes].unique())", "dataframe Returns: N/A \"\"\" target_string = st.selectbox(\"Select Target Column\", df.columns,", "import ClassBalance from streamlit_yellowbrick import st_yellowbrick from typing import Any,", "the target classes target_string = st.selectbox(\"Select Target Column\", df.columns) target", "support easier interpretation and problem detection, the report integrates numerical", "'mutual_info-classification' else: type_problem = st.selectbox(\"Select the type of problem\", ['classification',", "correlation graph Args: df - Pandas dataframe Returns: N/A \"\"\"", "data for modeling Args: df - Pandas dataframe Returns: test_features", "of problem that you select\") def class_balance(df: pd.DataFrame) -> None:", "method = st.selectbox(\"Select the correlation method\", ['mutual_info-regression', 'pearson']) try: viz", "df.columns) target = np.array(df[target_string]) # Select Features you want feature_cols", "the correlation method\", ['mutual_info-regression', 'pearson']) try: viz = FeatureCorrelation(method=method, feature_names=feature_cols,", "support=True, ) # Get the viz fig = visualizer.fig ax", "TODO live predictions def feature_correlation(df: pd.DataFrame) -> None: \"\"\" Purpose:", "# Specify the target classes target_string = st.selectbox(\"Select Target Column\",", "numerical scores with a color-coded heatmap. All heatmaps are in", "= st.selectbox(\"Select Class Column\", df.columns, index = len(df.columns) - 1)", "= np.array(feats) # Split Data randInt = random.randint(1, 200) (", "from sklearn.model_selection import train_test_split from yellowbrick.classifier import classification_report from yellowbrick.target", "\"The classification report visualizer displays the precision, recall, F1, and", "st.multiselect(\"Select Modeling Features\", residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if str(df[target_string].dtype) == \"object\":", "target, test_size=0.75, random_state=randInt) return ( test_features, train_features, test_target, train_target, )", "for modeling Args: df - Pandas dataframe Returns: test_features -", "df.columns, index = len(df.columns) - 1) visualizer = ClassBalance(labels =", "Returns: N/A \"\"\" target_string = st.selectbox(\"Select Target Column\", df.columns, key=\"selectbox-feature-correlation\")", "predictions def feature_correlation(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders a", "model. In order to support easier interpretation and problem detection,", "the type of problem\", ['classification', 'regression']) if type_problem == 'classification':", "Purpose: Renders a class balance graph Args: df - Pandas", "rows for index, featarr in enumerate(featurestmp): try: featarr = featarr.astype(float)", "- test set target train_target - train set target \"\"\"", "if col != target_string and df[col].dtype != \"object\"] feature_cols =", "def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]: \"\"\" Purpose:", "px.bar(x=viz.scores_, y=viz.features_, title=\"Feature Correlation\") st.plotly_chart(fig) except : st.warning(\"Verify the type", "all features features = df[feature_cols] featurestmp = np.array(features) feats =", "the type of problem that you select\") def class_balance(df: pd.DataFrame)", ") # Instantiate the visualizer visualizer = classification_report( GaussianNB(), train_features,", "test_target, support=True, ) # Get the viz fig = visualizer.fig", "-> Tuple[List, List, List, List]: \"\"\" Purpose: Prep data for", "TODO download model, Download report # TODO live predictions def", "Purpose: Renders a feature correlation graph Args: df - Pandas", "random_state=randInt) return ( test_features, train_features, test_target, train_target, ) def show_classification_report(", "st.selectbox(\"Select Target Column\", df.columns, key=\"selectbox-feature-correlation\") residual_cols = [col for col", "pd.DataFrame, ) -> None: \"\"\" Purpose: Renders a classification_report Args:", "Pandas dataframe Returns: test_features - test set features train_features -", "features features = df[feature_cols] featurestmp = np.array(features) feats = []", "= [] # find all bad rows for index, featarr", "set features train_features - train set feautres test_target - test", "<gh_stars>0 import random import numpy as np import pandas as", "(0.0, 1.0) to facilitate easy comparison of classification models across", "feature_correlation(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders a feature correlation", "feature_cols = st.multiselect(\"Select Modeling Features\", df.columns) # Get all features", ") # Get the viz fig = visualizer.fig ax =", "test set features train_features - train set feautres test_target -", "-> None: \"\"\" Purpose: Renders a feature correlation graph Args:", "st.stop() featuresarr = np.array(feats) # Split Data randInt = random.randint(1,", "train_target, ) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return ( test_features,", "classification_report Args: df - Pandas dataframe Returns: N/A \"\"\" #", "except Exception as error: st.error(error) st.error(featarr) st.stop() featuresarr = np.array(feats)", "pd.DataFrame) -> None: \"\"\" Purpose: Renders a class balance graph", "= np.array(features) feats = [] # find all bad rows", "problem\", ['classification', 'regression']) if type_problem == 'classification': method = st.selectbox(\"Select", "import random import numpy as np import pandas as pd", "features = df[feature_cols] featurestmp = np.array(features) feats = [] #", "'regression']) if type_problem == 'classification': method = st.selectbox(\"Select the correlation", "train_test_split from yellowbrick.classifier import classification_report from yellowbrick.target import FeatureCorrelation from", "import FeatureCorrelation from yellowbrick.target import ClassBalance from streamlit_yellowbrick import st_yellowbrick", "want feature_cols = st.multiselect(\"Select Modeling Features\", df.columns) # Get all", "import numpy as np import pandas as pd import streamlit", "Target Column\", df.columns, key=\"selectbox-feature-correlation\") residual_cols = [col for col in", "featarr = featarr.astype(float) feats.append(featarr) except Exception as error: st.error(error) st.error(featarr)", "target_string and df[col].dtype != \"object\"] feature_cols = st.multiselect(\"Select Modeling Features\",", "np.array(features) feats = [] # find all bad rows for", "facilitate easy comparison of classification models across different classification reports.\"", "List, List, List]: \"\"\" Purpose: Prep data for modeling Args:", "FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_, title=\"Feature", "that you select\") def class_balance(df: pd.DataFrame) -> None: \"\"\" Purpose:", "Returns: N/A \"\"\" # Prep data for model training (", "type_problem = st.selectbox(\"Select the type of problem\", ['classification', 'regression']) if", "= visualizer.show() fig.axes.append(ax) # show the viz st.write(fig) # TODO", "viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_,", "pd.DataFrame) -> Tuple[List, List, List, List]: \"\"\" Purpose: Prep data", "method = 'mutual_info-classification' else: type_problem = st.selectbox(\"Select the type of", "Args: df - Pandas dataframe Returns: N/A \"\"\" # Prep", "detection, the report integrates numerical scores with a color-coded heatmap.", ") -> None: \"\"\" Purpose: Renders a classification_report Args: df", "precision, recall, F1, and support scores for the model. In", "target = np.array(df[target_string]) # Select Features you want feature_cols =", "visualizer visualizer = classification_report( GaussianNB(), train_features, train_target, test_features, test_target, support=True,", "st.warning(\"Verify the type of problem that you select\") def class_balance(df:", "= len(df.columns) - 1) visualizer = ClassBalance(labels = df[classes].unique()) visualizer.fit(df[classes])", "N/A \"\"\" target_string = st.selectbox(\"Select Target Column\", df.columns, key=\"selectbox-feature-correlation\") residual_cols", "if str(df[target_string].dtype) == \"object\": method = 'mutual_info-classification' else: type_problem =", "train_features, test_target, train_target, ) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return", "correlation method\", ['mutual_info-classification', 'pearson']) else: method = st.selectbox(\"Select the correlation", "visualizer.fig ax = visualizer.show() fig.axes.append(ax) # show the viz st.write(fig)", "# Prep data for model training ( test_features, train_features, test_target,", "yellowbrick.classifier import classification_report from yellowbrick.target import FeatureCorrelation from yellowbrick.target import", "title=\"Feature Correlation\") st.plotly_chart(fig) except : st.warning(\"Verify the type of problem", "and support scores for the model. In order to support", "!= target_string and df[col].dtype != \"object\"] feature_cols = st.multiselect(\"Select Modeling", "Column\", df.columns, key=\"selectbox-feature-correlation\") residual_cols = [col for col in df.columns", "and problem detection, the report integrates numerical scores with a", "viz fig = visualizer.fig ax = visualizer.show() fig.axes.append(ax) # show", "visualizer.show() fig.axes.append(ax) # show the viz st.write(fig) # TODO download", "key=\"selectbox-feature-correlation\") residual_cols = [col for col in df.columns if col", "np import pandas as pd import streamlit as st from", "you want feature_cols = st.multiselect(\"Select Modeling Features\", df.columns) # Get", "st.selectbox(\"Select the type of problem\", ['classification', 'regression']) if type_problem ==", "random.randint(1, 200) ( test_features, train_features, test_target, train_target, ) = train_test_split(featuresarr,", "len(df.columns) - 1) visualizer = ClassBalance(labels = df[classes].unique()) visualizer.fit(df[classes]) st_yellowbrick(visualizer)", "st.button(\"Train Model\"): st.header(\"Classification Report\") st.markdown( \"The classification report visualizer displays", "Prep data for model training ( test_features, train_features, test_target, train_target,", "train_features, test_target, train_target, ) = data_prep(df) if st.button(\"Train Model\"): st.header(\"Classification", "method = st.selectbox(\"Select the correlation method\", ['mutual_info-classification', 'pearson']) else: method", "['mutual_info-regression', 'pearson']) try: viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string])", "- Pandas dataframe Returns: N/A \"\"\" classes = st.selectbox(\"Select Class", "class balance graph Args: df - Pandas dataframe Returns: N/A", "featarr in enumerate(featurestmp): try: featarr = featarr.astype(float) feats.append(featarr) except Exception", "\"\"\" Purpose: Renders a classification_report Args: df - Pandas dataframe", "comparison of classification models across different classification reports.\" ) #", "N/A \"\"\" classes = st.selectbox(\"Select Class Column\", df.columns, index =", "'classification': method = st.selectbox(\"Select the correlation method\", ['mutual_info-classification', 'pearson']) else:", "try: viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig =", "train_target, test_features, test_target, support=True, ) # Get the viz fig", "In order to support easier interpretation and problem detection, the", "= st.selectbox(\"Select Target Column\", df.columns, key=\"selectbox-feature-correlation\") residual_cols = [col for", "Data randInt = random.randint(1, 200) ( test_features, train_features, test_target, train_target,", "Features\", residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if str(df[target_string].dtype) == \"object\": method =", "Select Features you want feature_cols = st.multiselect(\"Select Modeling Features\", df.columns)", "st.multiselect(\"Select Modeling Features\", df.columns) # Get all features features =", "index, featarr in enumerate(featurestmp): try: featarr = featarr.astype(float) feats.append(featarr) except", "col in df.columns if col != target_string and df[col].dtype !=", "None: \"\"\" Purpose: Renders a class balance graph Args: df", "= random.randint(1, 200) ( test_features, train_features, test_target, train_target, ) =", "Renders a class balance graph Args: df - Pandas dataframe", "- test set features train_features - train set feautres test_target", "== \"object\": method = 'mutual_info-classification' else: type_problem = st.selectbox(\"Select the", "feature_cols = st.multiselect(\"Select Modeling Features\", residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if str(df[target_string].dtype)", "classification reports.\" ) # Instantiate the visualizer visualizer = classification_report(", "target_string = st.selectbox(\"Select Target Column\", df.columns, key=\"selectbox-feature-correlation\") residual_cols = [col", "( test_features, train_features, test_target, train_target, ) = train_test_split(featuresarr, target, test_size=0.75,", "heatmap. All heatmaps are in the range (0.0, 1.0) to", "class_balance(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders a class balance", "in the range (0.0, 1.0) to facilitate easy comparison of", "of problem\", ['classification', 'regression']) if type_problem == 'classification': method =", "\"object\": method = 'mutual_info-classification' else: type_problem = st.selectbox(\"Select the type", "Purpose: Prep data for modeling Args: df - Pandas dataframe", ") = data_prep(df) if st.button(\"Train Model\"): st.header(\"Classification Report\") st.markdown( \"The", "import plotly.express as px def data_prep(df: pd.DataFrame) -> Tuple[List, List,", "- Pandas dataframe Returns: test_features - test set features train_features", "features train_features - train set feautres test_target - test set", "Pandas dataframe Returns: N/A \"\"\" classes = st.selectbox(\"Select Class Column\",", "the report integrates numerical scores with a color-coded heatmap. All", "streamlit_yellowbrick import st_yellowbrick from typing import Any, List, Tuple import", "sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split from yellowbrick.classifier import", "import st_yellowbrick from typing import Any, List, Tuple import plotly.express", "show the viz st.write(fig) # TODO download model, Download report", "Tuple import plotly.express as px def data_prep(df: pd.DataFrame) -> Tuple[List,", "Returns: test_features - test set features train_features - train set", "= df[feature_cols] featurestmp = np.array(features) feats = [] # find", "type_problem == 'classification': method = st.selectbox(\"Select the correlation method\", ['mutual_info-classification',", "1.0) to facilitate easy comparison of classification models across different", "= st.selectbox(\"Select Target Column\", df.columns) target = np.array(df[target_string]) # Select", "for the model. In order to support easier interpretation and", "residual_cols = [col for col in df.columns if col !=", "as px def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]:", "in enumerate(featurestmp): try: featarr = featarr.astype(float) feats.append(featarr) except Exception as", "= st.selectbox(\"Select the type of problem\", ['classification', 'regression']) if type_problem", "fig = px.bar(x=viz.scores_, y=viz.features_, title=\"Feature Correlation\") st.plotly_chart(fig) except : st.warning(\"Verify", "Renders a classification_report Args: df - Pandas dataframe Returns: N/A", "== 'classification': method = st.selectbox(\"Select the correlation method\", ['mutual_info-classification', 'pearson'])", "for index, featarr in enumerate(featurestmp): try: featarr = featarr.astype(float) feats.append(featarr)", "test_features, train_features, test_target, train_target, ) def show_classification_report( df: pd.DataFrame, )", "Split Data randInt = random.randint(1, 200) ( test_features, train_features, test_target,", "to support easier interpretation and problem detection, the report integrates", "( test_features, train_features, test_target, train_target, ) = data_prep(df) if st.button(\"Train", "= [col for col in df.columns if col != target_string", "feature correlation graph Args: df - Pandas dataframe Returns: N/A", "featarr.astype(float) feats.append(featarr) except Exception as error: st.error(error) st.error(featarr) st.stop() featuresarr", "the precision, recall, F1, and support scores for the model.", "\"\"\" Purpose: Renders a class balance graph Args: df -", "st.error(error) st.error(featarr) st.stop() featuresarr = np.array(feats) # Split Data randInt", "select\") def class_balance(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders a", "# Instantiate the visualizer visualizer = classification_report( GaussianNB(), train_features, train_target,", "df.columns, key=\"selectbox-feature-correlation\") residual_cols = [col for col in df.columns if", "y=viz.features_, title=\"Feature Correlation\") st.plotly_chart(fig) except : st.warning(\"Verify the type of", "Exception as error: st.error(error) st.error(featarr) st.stop() featuresarr = np.array(feats) #", "featuresarr = np.array(feats) # Split Data randInt = random.randint(1, 200)", "try: featarr = featarr.astype(float) feats.append(featarr) except Exception as error: st.error(error)", "for col in df.columns if col != target_string and df[col].dtype", "= 'mutual_info-classification' else: type_problem = st.selectbox(\"Select the type of problem\",", "classification_report from yellowbrick.target import FeatureCorrelation from yellowbrick.target import ClassBalance from", "Instantiate the visualizer visualizer = classification_report( GaussianNB(), train_features, train_target, test_features,", "Args: df - Pandas dataframe Returns: N/A \"\"\" target_string =", "all bad rows for index, featarr in enumerate(featurestmp): try: featarr", "import GaussianNB from sklearn.model_selection import train_test_split from yellowbrick.classifier import classification_report", "different classification reports.\" ) # Instantiate the visualizer visualizer =", "as pd import streamlit as st from sklearn.naive_bayes import GaussianNB", "fig.axes.append(ax) # show the viz st.write(fig) # TODO download model,", "st.selectbox(\"Select Target Column\", df.columns) target = np.array(df[target_string]) # Select Features", "data for model training ( test_features, train_features, test_target, train_target, )", "return ( test_features, train_features, test_target, train_target, ) def show_classification_report( df:", "Column\", df.columns) target = np.array(df[target_string]) # Select Features you want", "test_target - test set target train_target - train set target", "test_target, train_target, ) = data_prep(df) if st.button(\"Train Model\"): st.header(\"Classification Report\")", "data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]: \"\"\" Purpose: Prep", "# TODO live predictions def feature_correlation(df: pd.DataFrame) -> None: \"\"\"", "df.columns if col != target_string and df[col].dtype != \"object\"] feature_cols", "Tuple[List, List, List, List]: \"\"\" Purpose: Prep data for modeling", "Get all features features = df[feature_cols] featurestmp = np.array(features) feats", "!= \"object\"] feature_cols = st.multiselect(\"Select Modeling Features\", residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5])", "# Get the viz fig = visualizer.fig ax = visualizer.show()", "All heatmaps are in the range (0.0, 1.0) to facilitate", "default=residual_cols[:5]) if str(df[target_string].dtype) == \"object\": method = 'mutual_info-classification' else: type_problem", "['classification', 'regression']) if type_problem == 'classification': method = st.selectbox(\"Select the", "in df.columns if col != target_string and df[col].dtype != \"object\"]", "\"object\"] feature_cols = st.multiselect(\"Select Modeling Features\", residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if", "\"\"\" classes = st.selectbox(\"Select Class Column\", df.columns, index = len(df.columns)", "'pearson']) else: method = st.selectbox(\"Select the correlation method\", ['mutual_info-regression', 'pearson'])", "from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split from yellowbrick.classifier", "Specify the target classes target_string = st.selectbox(\"Select Target Column\", df.columns)", "st.write(fig) # TODO download model, Download report # TODO live", "integrates numerical scores with a color-coded heatmap. All heatmaps are", "px def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]: \"\"\"", "def show_classification_report( df: pd.DataFrame, ) -> None: \"\"\" Purpose: Renders", "range (0.0, 1.0) to facilitate easy comparison of classification models", "to facilitate easy comparison of classification models across different classification", "dataframe Returns: N/A \"\"\" classes = st.selectbox(\"Select Class Column\", df.columns,", "def feature_correlation(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders a feature", "pandas as pd import streamlit as st from sklearn.naive_bayes import", "else: type_problem = st.selectbox(\"Select the type of problem\", ['classification', 'regression'])", "test_features, train_features, test_target, train_target, ) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt)", "None: \"\"\" Purpose: Renders a classification_report Args: df - Pandas", "a color-coded heatmap. All heatmaps are in the range (0.0,", "\"\"\" target_string = st.selectbox(\"Select Target Column\", df.columns, key=\"selectbox-feature-correlation\") residual_cols =", "GaussianNB(), train_features, train_target, test_features, test_target, support=True, ) # Get the", "graph Args: df - Pandas dataframe Returns: N/A \"\"\" classes", "FeatureCorrelation from yellowbrick.target import ClassBalance from streamlit_yellowbrick import st_yellowbrick from", "the model. In order to support easier interpretation and problem", "train_target, ) def show_classification_report( df: pd.DataFrame, ) -> None: \"\"\"", "else: method = st.selectbox(\"Select the correlation method\", ['mutual_info-regression', 'pearson']) try:", "plotly.express as px def data_prep(df: pd.DataFrame) -> Tuple[List, List, List,", "data_prep(df) if st.button(\"Train Model\"): st.header(\"Classification Report\") st.markdown( \"The classification report", "-> None: \"\"\" Purpose: Renders a classification_report Args: df -", "model, Download report # TODO live predictions def feature_correlation(df: pd.DataFrame)", "-> None: \"\"\" Purpose: Renders a class balance graph Args:", "classification report visualizer displays the precision, recall, F1, and support", "heatmaps are in the range (0.0, 1.0) to facilitate easy", "the range (0.0, 1.0) to facilitate easy comparison of classification", "key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if str(df[target_string].dtype) == \"object\": method = 'mutual_info-classification' else:", ") = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return ( test_features, train_features,", "df: pd.DataFrame, ) -> None: \"\"\" Purpose: Renders a classification_report", "'pearson']) try: viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig", "set target train_target - train set target \"\"\" # Specify", "support scores for the model. In order to support easier", "across different classification reports.\" ) # Instantiate the visualizer visualizer", "target \"\"\" # Specify the target classes target_string = st.selectbox(\"Select", ") def show_classification_report( df: pd.DataFrame, ) -> None: \"\"\" Purpose:", "numpy as np import pandas as pd import streamlit as", "( test_features, train_features, test_target, train_target, ) def show_classification_report( df: pd.DataFrame,", "Download report # TODO live predictions def feature_correlation(df: pd.DataFrame) ->", "train_features, train_target, test_features, test_target, support=True, ) # Get the viz", "= px.bar(x=viz.scores_, y=viz.features_, title=\"Feature Correlation\") st.plotly_chart(fig) except : st.warning(\"Verify the", "List]: \"\"\" Purpose: Prep data for modeling Args: df -", "= visualizer.fig ax = visualizer.show() fig.axes.append(ax) # show the viz", "interpretation and problem detection, the report integrates numerical scores with", "st from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split from", "as error: st.error(error) st.error(featarr) st.stop() featuresarr = np.array(feats) # Split", "models across different classification reports.\" ) # Instantiate the visualizer", "import pandas as pd import streamlit as st from sklearn.naive_bayes", "Features you want feature_cols = st.multiselect(\"Select Modeling Features\", df.columns) #", "recall, F1, and support scores for the model. In order", "visualizer = classification_report( GaussianNB(), train_features, train_target, test_features, test_target, support=True, )", "Pandas dataframe Returns: N/A \"\"\" target_string = st.selectbox(\"Select Target Column\",", "from yellowbrick.classifier import classification_report from yellowbrick.target import FeatureCorrelation from yellowbrick.target", "Renders a feature correlation graph Args: df - Pandas dataframe", "- Pandas dataframe Returns: N/A \"\"\" # Prep data for", "from typing import Any, List, Tuple import plotly.express as px", "sklearn.model_selection import train_test_split from yellowbrick.classifier import classification_report from yellowbrick.target import", "Target Column\", df.columns) target = np.array(df[target_string]) # Select Features you", "train_features - train set feautres test_target - test set target", "F1, and support scores for the model. In order to", "test_features, test_target, support=True, ) # Get the viz fig =", "pd.DataFrame) -> None: \"\"\" Purpose: Renders a feature correlation graph", "[] # find all bad rows for index, featarr in", "train_target, ) = data_prep(df) if st.button(\"Train Model\"): st.header(\"Classification Report\") st.markdown(", "Args: df - Pandas dataframe Returns: N/A \"\"\" classes =", "classes target_string = st.selectbox(\"Select Target Column\", df.columns) target = np.array(df[target_string])", "the viz st.write(fig) # TODO download model, Download report #", "df - Pandas dataframe Returns: N/A \"\"\" # Prep data", "col != target_string and df[col].dtype != \"object\"] feature_cols = st.multiselect(\"Select", "feats.append(featarr) except Exception as error: st.error(error) st.error(featarr) st.stop() featuresarr =", "method\", ['mutual_info-classification', 'pearson']) else: method = st.selectbox(\"Select the correlation method\",", "from streamlit_yellowbrick import st_yellowbrick from typing import Any, List, Tuple", "viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_, title=\"Feature Correlation\") st.plotly_chart(fig) except", "train_target - train set target \"\"\" # Specify the target", "type of problem\", ['classification', 'regression']) if type_problem == 'classification': method", "= classification_report( GaussianNB(), train_features, train_target, test_features, test_target, support=True, ) #", "viz st.write(fig) # TODO download model, Download report # TODO", "import classification_report from yellowbrick.target import FeatureCorrelation from yellowbrick.target import ClassBalance", "bad rows for index, featarr in enumerate(featurestmp): try: featarr =", "train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return ( test_features, train_features, test_target, train_target,", "the correlation method\", ['mutual_info-classification', 'pearson']) else: method = st.selectbox(\"Select the", "= np.array(df[target_string]) # Select Features you want feature_cols = st.multiselect(\"Select", "correlation method\", ['mutual_info-regression', 'pearson']) try: viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True)", "error: st.error(error) st.error(featarr) st.stop() featuresarr = np.array(feats) # Split Data", "Correlation\") st.plotly_chart(fig) except : st.warning(\"Verify the type of problem that", "from yellowbrick.target import ClassBalance from streamlit_yellowbrick import st_yellowbrick from typing", "the visualizer visualizer = classification_report( GaussianNB(), train_features, train_target, test_features, test_target,", "a classification_report Args: df - Pandas dataframe Returns: N/A \"\"\"", "report # TODO live predictions def feature_correlation(df: pd.DataFrame) -> None:", "= featarr.astype(float) feats.append(featarr) except Exception as error: st.error(error) st.error(featarr) st.stop()", "featurestmp = np.array(features) feats = [] # find all bad", "- train set target \"\"\" # Specify the target classes", "a feature correlation graph Args: df - Pandas dataframe Returns:", "Pandas dataframe Returns: N/A \"\"\" # Prep data for model", "easy comparison of classification models across different classification reports.\" )", "import Any, List, Tuple import plotly.express as px def data_prep(df:", "df[feature_cols] featurestmp = np.array(features) feats = [] # find all", "set target \"\"\" # Specify the target classes target_string =", "as st from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split", "df[col].dtype != \"object\"] feature_cols = st.multiselect(\"Select Modeling Features\", residual_cols, key=\"multiselect-feature-correlation\",", "- train set feautres test_target - test set target train_target", "problem that you select\") def class_balance(df: pd.DataFrame) -> None: \"\"\"", "train set feautres test_target - test set target train_target -", "feautres test_target - test set target train_target - train set", "st.error(featarr) st.stop() featuresarr = np.array(feats) # Split Data randInt =", "scores with a color-coded heatmap. All heatmaps are in the", "dataframe Returns: N/A \"\"\" # Prep data for model training", "= st.selectbox(\"Select the correlation method\", ['mutual_info-regression', 'pearson']) try: viz =", "are in the range (0.0, 1.0) to facilitate easy comparison", "of classification models across different classification reports.\" ) # Instantiate", "randInt = random.randint(1, 200) ( test_features, train_features, test_target, train_target, )", "classification_report( GaussianNB(), train_features, train_target, test_features, test_target, support=True, ) # Get", "target classes target_string = st.selectbox(\"Select Target Column\", df.columns) target =", "type of problem that you select\") def class_balance(df: pd.DataFrame) ->", "dataframe Returns: test_features - test set features train_features - train", "show_classification_report( df: pd.DataFrame, ) -> None: \"\"\" Purpose: Renders a", "200) ( test_features, train_features, test_target, train_target, ) = train_test_split(featuresarr, target,", "the viz fig = visualizer.fig ax = visualizer.show() fig.axes.append(ax) #", "= FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_,", "classification models across different classification reports.\" ) # Instantiate the", "st.markdown( \"The classification report visualizer displays the precision, recall, F1,", "Modeling Features\", residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if str(df[target_string].dtype) == \"object\": method", "residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if str(df[target_string].dtype) == \"object\": method = 'mutual_info-classification'", "reports.\" ) # Instantiate the visualizer visualizer = classification_report( GaussianNB(),", "displays the precision, recall, F1, and support scores for the", "Get the viz fig = visualizer.fig ax = visualizer.show() fig.axes.append(ax)", "Column\", df.columns, index = len(df.columns) - 1) visualizer = ClassBalance(labels", "= st.multiselect(\"Select Modeling Features\", df.columns) # Get all features features", "test_target, train_target, ) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return (", "streamlit as st from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import", "download model, Download report # TODO live predictions def feature_correlation(df:", "method\", ['mutual_info-regression', 'pearson']) try: viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols],", "\"\"\" Purpose: Renders a feature correlation graph Args: df -", "# TODO download model, Download report # TODO live predictions", "Class Column\", df.columns, index = len(df.columns) - 1) visualizer =", "except : st.warning(\"Verify the type of problem that you select\")", "st.header(\"Classification Report\") st.markdown( \"The classification report visualizer displays the precision,", "# show the viz st.write(fig) # TODO download model, Download", "df - Pandas dataframe Returns: test_features - test set features", "report integrates numerical scores with a color-coded heatmap. All heatmaps", "target train_target - train set target \"\"\" # Specify the", "if st.button(\"Train Model\"): st.header(\"Classification Report\") st.markdown( \"The classification report visualizer", "feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_, title=\"Feature Correlation\")", "df - Pandas dataframe Returns: N/A \"\"\" classes = st.selectbox(\"Select", "training ( test_features, train_features, test_target, train_target, ) = data_prep(df) if", "test_features, train_features, test_target, train_target, ) = data_prep(df) if st.button(\"Train Model\"):", "balance graph Args: df - Pandas dataframe Returns: N/A \"\"\"", "ClassBalance from streamlit_yellowbrick import st_yellowbrick from typing import Any, List,", "= st.multiselect(\"Select Modeling Features\", residual_cols, key=\"multiselect-feature-correlation\", default=residual_cols[:5]) if str(df[target_string].dtype) ==", "np.array(df[target_string]) # Select Features you want feature_cols = st.multiselect(\"Select Modeling", "df.columns) # Get all features features = df[feature_cols] featurestmp =", "find all bad rows for index, featarr in enumerate(featurestmp): try:", "df - Pandas dataframe Returns: N/A \"\"\" target_string = st.selectbox(\"Select", "df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_, title=\"Feature Correlation\") st.plotly_chart(fig) except :", "model training ( test_features, train_features, test_target, train_target, ) = data_prep(df)", "= train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return ( test_features, train_features, test_target,", "st_yellowbrick from typing import Any, List, Tuple import plotly.express as", "easier interpretation and problem detection, the report integrates numerical scores", "# find all bad rows for index, featarr in enumerate(featurestmp):", "GaussianNB from sklearn.model_selection import train_test_split from yellowbrick.classifier import classification_report from", "Args: df - Pandas dataframe Returns: test_features - test set", "st.selectbox(\"Select the correlation method\", ['mutual_info-regression', 'pearson']) try: viz = FeatureCorrelation(method=method,", "test_features - test set features train_features - train set feautres", "train_features, test_target, train_target, ) def show_classification_report( df: pd.DataFrame, ) ->", "from yellowbrick.target import FeatureCorrelation from yellowbrick.target import ClassBalance from streamlit_yellowbrick", "you select\") def class_balance(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders", "st.plotly_chart(fig) except : st.warning(\"Verify the type of problem that you", "target_string = st.selectbox(\"Select Target Column\", df.columns) target = np.array(df[target_string]) #", "= st.selectbox(\"Select the correlation method\", ['mutual_info-classification', 'pearson']) else: method =", "test_target, train_target, ) def show_classification_report( df: pd.DataFrame, ) -> None:", "ax = visualizer.show() fig.axes.append(ax) # show the viz st.write(fig) #", "str(df[target_string].dtype) == \"object\": method = 'mutual_info-classification' else: type_problem = st.selectbox(\"Select", "enumerate(featurestmp): try: featarr = featarr.astype(float) feats.append(featarr) except Exception as error:", "pd import streamlit as st from sklearn.naive_bayes import GaussianNB from", "\"\"\" Purpose: Prep data for modeling Args: df - Pandas", "typing import Any, List, Tuple import plotly.express as px def", "live predictions def feature_correlation(df: pd.DataFrame) -> None: \"\"\" Purpose: Renders", "# Select Features you want feature_cols = st.multiselect(\"Select Modeling Features\",", "None: \"\"\" Purpose: Renders a feature correlation graph Args: df", "- Pandas dataframe Returns: N/A \"\"\" target_string = st.selectbox(\"Select Target", "for model training ( test_features, train_features, test_target, train_target, ) =", "order to support easier interpretation and problem detection, the report", "np.array(feats) # Split Data randInt = random.randint(1, 200) ( test_features,", "with a color-coded heatmap. All heatmaps are in the range", "scores for the model. In order to support easier interpretation", "Modeling Features\", df.columns) # Get all features features = df[feature_cols]", "visualizer displays the precision, recall, F1, and support scores for", "List, List]: \"\"\" Purpose: Prep data for modeling Args: df", "Returns: N/A \"\"\" classes = st.selectbox(\"Select Class Column\", df.columns, index", "set feautres test_target - test set target train_target - train", "and df[col].dtype != \"object\"] feature_cols = st.multiselect(\"Select Modeling Features\", residual_cols,", "feats = [] # find all bad rows for index,", "# Get all features features = df[feature_cols] featurestmp = np.array(features)", "Any, List, Tuple import plotly.express as px def data_prep(df: pd.DataFrame)", "a class balance graph Args: df - Pandas dataframe Returns:", "['mutual_info-classification', 'pearson']) else: method = st.selectbox(\"Select the correlation method\", ['mutual_info-regression',", "[col for col in df.columns if col != target_string and", "train set target \"\"\" # Specify the target classes target_string", "classes = st.selectbox(\"Select Class Column\", df.columns, index = len(df.columns) -", "import train_test_split from yellowbrick.classifier import classification_report from yellowbrick.target import FeatureCorrelation", "yellowbrick.target import FeatureCorrelation from yellowbrick.target import ClassBalance from streamlit_yellowbrick import", "sort=True) viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_, title=\"Feature Correlation\") st.plotly_chart(fig)", "N/A \"\"\" # Prep data for model training ( test_features,", "Report\") st.markdown( \"The classification report visualizer displays the precision, recall,", ": st.warning(\"Verify the type of problem that you select\") def" ]
[ "from flask import session from flask import url_for import time", "except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") #", "current_app.logger.error(e) day_count = 0 try: day_begin = \"%d-%02d-%02d\" % (now.tm_year,", "- timedelta(days=(i - 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count = 0 try: count", "except Exception as e: return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") # 2. 将标题图片上传到七牛", "= session.get(\"user_id\", None) is_admin = session.get(\"is_admin\", False) if user_id and", "return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if action not in (\"accept\", \"reject\"): return", "\"POST\"]) def news_edit(): \"\"\"返回新闻列表\"\"\" page = request.args.get(\"p\", \"1\") print(page) a", "current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") # 1.2 尝试读取图片", "= constants.QINIU_DOMIN_PREFIX + key # 3. 设置相关数据 news.title = title", "- 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count = 0 try: count = User.query.filter(User.is_admin", "methods=[\"GET\", \"POST\"]) def news_edit(): \"\"\"返回新闻列表\"\"\" page = request.args.get(\"p\", \"1\") print(page)", "render_template(\"admin/news_edit.html\", data=data) # return jsonify(errno=RET.OK, errmsg=\"OK\") return render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\",", "return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data def admin_index(): user = g.user return", "User.query.filter(User.is_admin == False)\\ .order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users = paginate.items", "return render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"]) def news_review_detail(): \"\"\"新闻审核\"\"\" #", "action = request.json.get(\"action\") #2. 判断参数 if not all([news_id, action]): return", "jsonify from flask import g from flask import make_response from", "data = {\"total_count\": total_count, \"mon_count\": mon_count, \"day_count\": day_count, \"active_date\": active_date,", "1 try: filters = [News.status != 0] # 如果有关键词 if", "# 如果有关键词 if keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\\", "1 total_page = 1 try: filters = list() # 如果有关键词", "保存到数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return", "1.2 尝试读取图片 if index_image: try: index_image = index_image.read() except Exception", "= 1 #查询数据 try: paginate = User.query.filter(User.is_admin == False)\\ .order_by(User.last_login.desc())\\", "current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url = constants.QINIU_DOMIN_PREFIX + key #", "render_template(\"admin/news_review_detail.html\", data=data) # 执行审核操作 # 1. 获取参数 news_id = request.json.get(\"news_id\")", "page = int(page) except Exception as e: current_app.logger.error(e) page =", "categories_li = [] for category in categories: c_dict = category.to_dict()", "content, categery_id) news = None try: news = News.query.get(news_id) except", "as e: current_app.logger.error(e) page = 1 news_list = list() current_page", "e: current_app.logger.error(e) day_count = 0 try: day_begin = \"%d-%02d-%02d\" %", "try: print(page) page = int(page) except Exception as e: current_app.logger.error(e)", "current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\",", "e: current_app.logger.error(e) page = 1 news_list = list() current_page =", "@admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"]) def news_review_detail(): \"\"\"新闻审核\"\"\" # 获取新闻id if request.method", "return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason = reason news.status = -1 #", "content news.category_id = categery_id # 4. 保存到数据库 try: db.session.commit() except", "返回内容 data = { \"categories\": categories_dicts } return render_template(\"admin/news_type.html\", data=data)", "current_page, \"news_list\": news_dict_list } return render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"])", "print(page) page = int(page) except Exception as e: current_app.logger.error(e) page", "data = { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) categories", "digest, content, categery_id]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title, digest, content, categery_id)", "from info import constants, db from info import redis_store from", "= Category.query.get(category_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\")", "flask import make_response from flask import redirect from flask import", "errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"]) def news_edit(): \"\"\"返回新闻列表\"\"\"", "return render_template(\"admin/news_review_detail.html\", data=data) # 执行审核操作 # 1. 获取参数 news_id =", "\"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 返回数据 data =", "jsonify(errno=RET.OK, errmsg=\"OK\") return render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"]) def news_edit_detail():", "= \"\" try: page = int(page) except Exception as e:", "if keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page,", "@admin_blu.route(\"/news_review\") def news_review(): \"\"\"返回待审核新闻列表\"\"\" page = request.args.get(\"p\", 1) keywords =", "user = User.query.filter(User.mobile == username).first() except Exception as e: current_app.logger.error(e)", "if action == \"accept\": news.status = 0 else: # 拒绝通过,需要获取原因", "if not all([news_id, action]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if action not", "news_dict_list = list() for news in news_list: news_dict_list.append(news.to_basic_dict()) data =", "= list() for news in news_list: news_dict_list.append(news.to_review_dict()) data = {", "# 获取参数 page = request.args.get(\"p\", 1) try: print(page) page =", "\"\"\"返回待审核新闻列表\"\"\" page = request.args.get(\"p\", 1) keywords = request.args.get(\"keywords\", \"\") try:", "return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") # 5. 返回结果 return jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\")", "= User.query.filter(User.is_admin == False).count() except Exception as e: current_app.logger.error(e) #", "current_page, \"users\": users_list } return render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\") def news_review():", "current_page = 1 total_page = 1 try: filters = list()", "render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"] = user.id session[\"nick_name\"] = user.nick_name session[\"mobile\"] =", "content = request.form.get(\"content\") index_image = request.form.get(\"index-image\") categery_id = request.form.get(\"category_id\") #", "# 判断是否有分类id if category_id: try: category = Category.query.get(category_id) except Exception", "import redis_store from info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha import captcha", "except Exception as e: current_app.error(e) news_dict_list = list() for news", "= 1 total_page = 1 try: filters = list() #", "data=data) @admin_blu.route(\"/user_list\") def user_list(): \"\"\"获取用户列表\"\"\" # 获取参数 page = request.args.get(\"p\",", "# 去session 中取到指定的值 user_id = session.get(\"user_id\", None) is_admin = session.get(\"is_admin\",", "= [] for category in categories: # 获取字典 cate_dict =", "import user_login_data from datetime import datetime, timedelta from . import", "as e: current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse() data = {\"total_count\": total_count,", "news: return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") # 1.2 尝试读取图片 if index_image: try:", "passport_blu from info.models import User, Category, News from info.modules.profile import", "= list() current_page = 1 total_page = 1 try: filters", "= request.form.get(\"index-image\") categery_id = request.form.get(\"category_id\") # 1.1 判断数据是否有值: if not", "user_id = session.get(\"user_id\", None) is_admin = session.get(\"is_admin\", False) if user_id", "errmsg=\"用户不是管理员\") session[\"user_id\"] = user.id session[\"nick_name\"] = user.nick_name session[\"mobile\"] = user.mobile", "news.status = -1 # 保存数据库 try: db.session.commit() except Exception as", "from info.modules.passport import passport_blu from info.models import User, Category, News", "paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e) news_dict_list", "\"new_list\": news_dict_list, \"last_input\": b } if request.method == \"GET\": return", "return render_template(\"admin/login.html\") # 取到登陆的参数 username = request.form.get(\"username\") password = request.form.get(\"password\")", "in range(0, 31): begin_date = now_date - timedelta(days=i) end_date =", "except Exception as e: current_app.logger.error(e) # 将模型列表转换成字典列表 users_list = []", "\"current_page\": current_page, \"users\": users_list } return render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\") def", "# 获取参数 news_id = request.args.get(\"news_id\") if not news_id: data =", "request.json.get(\"name\") print(category_name) if not category_name: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") # 判断是否有分类id", "jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"]) def news_edit(): \"\"\"返回新闻列表\"\"\" page =", "= None try: # 3. 查询新闻 news = News.query.get(news_id) except", "print(category_name) if not category_name: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") # 判断是否有分类id if", "= \"%d-%02d-01\" % (now.tm_year, now.tm_mon) mon_begin_date = datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count", "== \"GET\": # 去session 中取到指定的值 user_id = session.get(\"user_id\", None) is_admin", "e: current_app.logger.error(e) # 查询月新增数 mon_count = 0 try: now =", "except Exception as e: current_app.logger.error(e) return render_template(\"admin/login.html\", errmsg=\"数据错误\") if not", "current_app, jsonify from flask import g from flask import make_response", "from info import redis_store from info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha", "as e: current_app.logger.error(e) # 将模型列表转换成字典列表 users_list = [] for user", "all([username, password]): return render_template(\"admin/login.html\", errmsg=\"参数错误\") try: user = User.query.filter(User.mobile ==", "#查询数据 try: paginate = User.query.filter(User.is_admin == False)\\ .order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT,", "= [] current_page = 1 total_page = 1 #查询数据 try:", "total_page, \"current_page\": current_page, \"news_list\": news_dict_list } return render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\",", "= re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b) page = a.group() if b !=", "try: category = Category.query.get(category_id) except Exception as e: current_app.logger.error(e) return", "news_list = paginate.items current_page = paginate.page total_page = paginate.pages except", "user_id or not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\") @admin_blu.route(\"/user_count\") def", "= 1 try: filters = list() # 如果有关键词 if keywords:", "all([title, digest, content, categery_id]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title, digest, content,", "data = { \"categories\": categories_dicts } return render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\",", "} return render_template(\"admin/news_edit_detail.html\", data=data) news_id = request.form.get(\"news_id\") title = request.form.get(\"title\")", "\"news_list\": news_dict_list } return render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"]) def", "User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count() except Exception as e: current_app.logger.error(e) day_count", "RET from info.modules.passport import passport_blu from info.models import User, Category,", "render_template(\"admin/login.html\", errmsg=\"数据错误\") if not user: return render_template(\"admin/login.html\", errmsg=\"用户名错误\") if not", "e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\",", "= b[0] keywords = b else: keywords = None b", "\"\") try: page = int(page) except Exception as e: current_app.logger.error(e)", ".order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page", "return render_template(\"admin/login.html\", errmsg=\"数据错误\") if not user: return render_template(\"admin/login.html\", errmsg=\"用户名错误\") if", "Exception as e: current_app.logger.error(e) # 将模型列表转换成字典列表 users_list = [] for", "= -1 # 保存数据库 try: db.session.commit() except Exception as e:", "keywords = b else: keywords = None b = \"\"", "if keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询 paginate = News.query.filter(*filters)\\", "0] # 如果有关键词 if keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate =", "redirect from flask import render_template from flask import request from", "== False, User.last_login >= begin_date, User.last_login < end_date).count() print(count) except", "request.form.get(\"digest\") content = request.form.get(\"content\") index_image = request.form.get(\"index-image\") categery_id = request.form.get(\"category_id\")", "return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title, digest, content, categery_id) news = None", "{\"total_count\": total_count, \"mon_count\": mon_count, \"day_count\": day_count, \"active_date\": active_date, \"active_count\": active_count}", "添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询 paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT,", "info.modules.passport import passport_blu from info.models import User, Category, News from", "for news in news_list: news_dict_list.append(news.to_basic_dict()) data = { \"total_page\": total_page,", "flask import request import random import re from flask import", "= request.form.get(\"news_id\") title = request.form.get(\"title\") digest= request.form.get(\"digest\") content = request.form.get(\"content\")", "action not in (\"accept\", \"reject\"): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news =", "Exception as e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\")", "= category_name db.session.add(new_category) db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback()", "= 0 try: now = time.localtime() mon_begin = \"%d-%02d-01\" %", "Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK,", "methods=[\"GET\", \"POST\"]) def news_review_detail(): \"\"\"新闻审核\"\"\" # 获取新闻id if request.method ==", "None b = \"\" try: page = int(page) except Exception", "session.get(\"is_admin\", False) if user_id and is_admin: return redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\")", "news_id = request.args.get(\"news_id\") if not news_id: data = { \"errmsg\":", "# 5. 返回结果 return jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def get_news_category(): #", "reason news.status = -1 # 保存数据库 try: db.session.commit() except Exception", "if not news_id: data = { \"errmsg\": \"没有找到新闻\" } return", "= { \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 通过id查询新闻", "return render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"]) def news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if", "news.digest = digest news.content = content news.category_id = categery_id #", "\"%Y-%m-%d\") print(now_date) # 定义空数组,保存数据 active_date = list() active_count = list()", "category_name: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") # 判断是否有分类id if category_id: try: category", "jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason = reason news.status = -1 # 保存数据库", "and is_admin: return redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\") # 取到登陆的参数 username =", "categories: # 获取字典 cate_dict = category.to_dict() # 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0)", "\"categories\": categories_dicts } return render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"]) def add_category():", "= { \"news\": news.to_dict(), \"categories\": categories_li } return render_template(\"admin/news_edit_detail.html\", data=data)", "= category_name return jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else: # 如果没有分类id, 添加分类 try:", "获取新闻id if request.method == \"GET\": news_id = request.args.get(\"news_id\") if not", "jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if action == \"accept\": news.status = 0 else:", "= 0 try: day_begin = \"%d-%02d-%02d\" % (now.tm_year, now.tm_mon, now.tm_mday)", "return render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"]) def add_category(): \"\"\"修改或者添加分类\"\"\" category_id =", "news.to_dict() } return render_template(\"admin/news_review_detail.html\", data=data) # 执行审核操作 # 1. 获取参数", "paginate.items current_page = paginate.page total_page = paginate.pages except Exception as", "news: data = { \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data)", "render_template(\"admin/news_review_detail.html\", data=data) # 返回数据 data = { \"news\": news.to_dict() }", "re.match(r\"^\\d*\", page) b = re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b) page = a.group()", "category in categories: c_dict = category.to_dict() c_dict[\"is_selected\"] = False if", "not news: return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") # 1.2 尝试读取图片 if index_image:", "return render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\") def user_list(): \"\"\"获取用户列表\"\"\" # 获取参数 page", "active_date, \"active_count\": active_count} return render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\") def user_list(): \"\"\"获取用户列表\"\"\"", "int(page) except Exception as e: current_app.logger.error(e) page = 1 news_list", "mon_count = User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count() except Exception as e:", "total_page = paginate.pages except Exception as e: current_app.logger.error(e) news_dict_list =", "data=data) # return jsonify(errno=RET.OK, errmsg=\"OK\") return render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\",", "= 0 else: # 拒绝通过,需要获取原因 reason = request.json.get(\"reason\") if not", "== News.category_id: c_dict[\"is_selected\"] = True categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0) data", "session[\"nick_name\"] = user.nick_name session[\"mobile\"] = user.mobile session[\"is_admin\"] = True #", "= True # 跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data def admin_index():", "request.json.get(\"news_id\") action = request.json.get(\"action\") #2. 判断参数 if not all([news_id, action]):", "paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e) #", "session from flask import url_for import time from info import", "e: current_app.logger.error(e) if not news: data = { \"errmsg\": \"没有找到新闻\"", "else: # 如果没有分类id, 添加分类 try: new_category = Category() new_category.id =", "= request.form.get(\"password\") if not all([username, password]): return render_template(\"admin/login.html\", errmsg=\"参数错误\") try:", "now_date - timedelta(days=i) end_date = now_date - timedelta(days=(i - 1))", "\"categories\": categories_li } return render_template(\"admin/news_edit_detail.html\", data=data) news_id = request.form.get(\"news_id\") title", "def news_review_detail(): \"\"\"新闻审核\"\"\" # 获取新闻id if request.method == \"GET\": news_id", "info.utils.common import user_login_data from datetime import datetime, timedelta from .", "errmsg=\"OK\") return render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"]) def news_edit_detail(): \"\"\"新闻编辑详情\"\"\"", "category_name = request.json.get(\"name\") print(category_name) if not category_name: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")", "categories_dicts } return render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"]) def add_category(): \"\"\"修改或者添加分类\"\"\"", "= { \"total_page\": total_page, \"current_page\": current_page, \"users\": users_list } return", "new_category = Category() new_category.id = category_id new_category.name = category_name db.session.add(new_category)", "request.form.get(\"password\") if not all([username, password]): return render_template(\"admin/login.html\", errmsg=\"参数错误\") try: user", "user_login_data from datetime import datetime, timedelta from . import admin_blu", "= request.form.get(\"title\") digest= request.form.get(\"digest\") content = request.form.get(\"content\") index_image = request.form.get(\"index-image\")", "= { \"total_page\": total_page, \"current_page\": current_page, \"news_list\": news_dict_list } return", "判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\") @admin_blu.route(\"/user_count\") def user_count(): # 查询总人数 total_count =", "# 3. 设置相关数据 news.title = title news.digest = digest news.content", "通过id查询新闻 news = None try: news = News.query.get(news_id) except Exception", "= Category() new_category.id = category_id new_category.name = category_name db.session.add(new_category) db.session.commit()", "} return render_template(\"admin/news_review_detail.html\", data=data) # 通过id查询新闻 news = None try:", "= storage(index_image) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\")", "reason: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason = reason news.status = -1", "except Exception as e: current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse() data =", "= category.to_dict() c_dict[\"is_selected\"] = False if category.id == News.category_id: c_dict[\"is_selected\"]", "= User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count() except Exception as e: current_app.logger.error(e)", "from info.models import User, Category, News from info.modules.profile import profile_blu", "active_date.reverse() active_count.reverse() data = {\"total_count\": total_count, \"mon_count\": mon_count, \"day_count\": day_count,", "total_page = 1 #查询数据 try: paginate = User.query.filter(User.is_admin == False)\\", "news_id: data = { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data)", "import session from flask import url_for import time from info", "news_dict_list.append(news.to_review_dict()) data = { \"total_page\": total_page, \"current_page\": current_page, \"news_list\": news_dict_list", "title = request.form.get(\"title\") digest= request.form.get(\"digest\") content = request.form.get(\"content\") index_image =", "Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") # 5.", "User.last_login >= begin_date, User.last_login < end_date).count() print(count) except Exception as", "= paginate.page total_page = paginate.pages except Exception as e: current_app.error(e)", "@admin_blu.route(\"/user_count\") def user_count(): # 查询总人数 total_count = 0 try: total_count", "print(count) except Exception as e: current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse() data", "False if category.id == News.category_id: c_dict[\"is_selected\"] = True categories_li.append(c_dict) #", "new_category.id = category_id new_category.name = category_name db.session.add(new_category) db.session.commit() except Exception", "= a.group() if b != []: b = b[0] keywords", "(now.tm_year, now.tm_mon) mon_begin_date = datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count = User.query.filter(User.is_admin==False, User.create_time", "news_list: news_dict_list.append(news.to_review_dict()) data = { \"total_page\": total_page, \"current_page\": current_page, \"news_list\":", "try: user = User.query.filter(User.mobile == username).first() except Exception as e:", "{ \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) categories = Category.query.all()", "try: day_begin = \"%d-%02d-%02d\" % (now.tm_year, now.tm_mon, now.tm_mday) day_begin_date =", "1) keywords = request.args.get(\"keywords\", \"\") try: page = int(page) except", "categories_dicts = [] for category in categories: # 获取字典 cate_dict", "url_for import time from info import constants, db from info", "request.args.get(\"p\", 1) try: print(page) page = int(page) except Exception as", "current_app.logger.error(e) news_dict_list = list() for news in news_list: news_dict_list.append(news.to_basic_dict()) data", "digest news.content = content news.category_id = categery_id # 4. 保存到数据库", "def get_news_category(): # 获取所有的分类数据 categories = Category.query.all() # 定义列表保存分类数据 categories_dicts", "news.content = content news.category_id = categery_id # 4. 保存到数据库 try:", "try: filters = [News.status != 0] # 如果有关键词 if keywords:", "page = a.group() if b != []: b = b[0]", "设置相关数据 news.title = title news.digest = digest news.content = content", "返回数据 data = { \"news\": news.to_dict() } return render_template(\"admin/news_review_detail.html\", data=data)", "获取参数 news_id = request.args.get(\"news_id\") if not news_id: data = {", "request.method == \"GET\": return render_template(\"admin/news_edit.html\", data=data) # return jsonify(errno=RET.OK, errmsg=\"OK\")", "info.modules.profile import profile_blu from info.utils.common import user_login_data from datetime import", "not user.is_admin: return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"] = user.id session[\"nick_name\"] =", "total_page, \"current_page\": current_page, \"users\": users_list } return render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\")", "= [News.status != 0] # 如果有关键词 if keywords: # 添加关键字检索选项", "news_review(): \"\"\"返回待审核新闻列表\"\"\" page = request.args.get(\"p\", 1) keywords = request.args.get(\"keywords\", \"\")", "return redirect(\"/\") @admin_blu.route(\"/user_count\") def user_count(): # 查询总人数 total_count = 0", "news in news_list: news_dict_list.append(news.to_review_dict()) data = { \"total_page\": total_page, \"current_page\":", "Exception as e: current_app.logger.error(e) if not news: data = {", "active_count = list() # 依次添加数据,再反转 for i in range(0, 31):", "request.json.get(\"action\") #2. 判断参数 if not all([news_id, action]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")", "filters.append(News.title.contains(keywords)) # 查询 paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)", "# 查询总人数 total_count = 0 try: total_count = User.query.filter(User.is_admin ==", "% (now.tm_year, now.tm_mon, now.tm_mday) day_begin_date = datetime.strptime(day_begin, \"%Y-%m-%d\") day_count =", "g.user return render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request def before_request(): # 判断如果不是登陆页面的请求 if", "info.utils.response_code import RET from info.modules.passport import passport_blu from info.models import", "e: current_app.logger.error(e) news_dict_list = list() for news in news_list: news_dict_list.append(news.to_basic_dict())", "return render_template(\"admin/login.html\", errmsg=\"参数错误\") try: user = User.query.filter(User.mobile == username).first() except", "current_app.logger.error(e) if not news: data = { \"errmsg\": \"没有找到新闻\" }", "查询新闻 news = None try: news = News.query.get(news_id) except Exception", "from info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha import captcha from info.utils.image_storage", "= User.query.filter(User.mobile == username).first() except Exception as e: current_app.logger.error(e) return", "errmsg=\"用户名错误\") if not user.check_password(password): return render_template(\"admin/login.html\", errmsg=\"密码错误\") if not user.is_admin:", "data=data) categories = Category.query.all() categories_li = [] for category in", "news_list = list() current_page = 1 total_page = 1 try:", "= { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) # 查询新闻", "get_news_category(): # 获取所有的分类数据 categories = Category.query.all() # 定义列表保存分类数据 categories_dicts =", "content, categery_id]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title, digest, content, categery_id) news", "import User, Category, News from info.modules.profile import profile_blu from info.utils.common", "digest= request.form.get(\"digest\") content = request.form.get(\"content\") index_image = request.form.get(\"index-image\") categery_id =", "day_count = 0 try: day_begin = \"%d-%02d-%02d\" % (now.tm_year, now.tm_mon,", "from flask import current_app, jsonify from flask import g from", "[News.status != 0] # 如果有关键词 if keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords))", "errmsg=\"查询数据失败\") if not category: return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name = category_name", "if not all([username, password]): return render_template(\"admin/login.html\", errmsg=\"参数错误\") try: user =", "data=data) # 通过id查询新闻 news = None try: news = News.query.get(news_id)", "Exception as e: current_app.logger.error(e) news_dict_list = list() for news in", "# 返回内容 data = { \"categories\": categories_dicts } return render_template(\"admin/news_type.html\",", "data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"]) def news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if request.method ==", "as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if not category: return", "拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容 data = { \"categories\": categories_dicts", "\"last_input\": b } if request.method == \"GET\": return render_template(\"admin/news_edit.html\", data=data)", "jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") # 1.2 尝试读取图片 if index_image: try: index_image =", "# 如果没有分类id, 添加分类 try: new_category = Category() new_category.id = category_id", "# 判断如果不是登陆页面的请求 if not request.url.endswith(url_for(\"admin.admin_login\")): user_id = session.get(\"user_id\") is_admin =", "session.get(\"user_id\", None) is_admin = session.get(\"is_admin\", False) if user_id and is_admin:", "paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items", "datetime import datetime, timedelta from . import admin_blu @admin_blu.route(\"/login\", methods=[\"GET\",", "# 跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data def admin_index(): user =", "= list() active_count = list() # 依次添加数据,再反转 for i in", "methods=[\"GET\", \"POST\"]) def admin_login(): if request.method == \"GET\": # 去session", "= category_id new_category.name = category_name db.session.add(new_category) db.session.commit() except Exception as", "取到登陆的参数 username = request.form.get(\"username\") password = request.form.get(\"password\") if not all([username,", "= 1 total_page = 1 #查询数据 try: paginate = User.query.filter(User.is_admin", "import constants, db from info import redis_store from info.lib.yuntongxun.sms import", "day_begin_date = datetime.strptime(day_begin, \"%Y-%m-%d\") day_count = User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count()", "errmsg=\"数据错误\") if not user: return render_template(\"admin/login.html\", errmsg=\"用户名错误\") if not user.check_password(password):", "if not request.url.endswith(url_for(\"admin.admin_login\")): user_id = session.get(\"user_id\") is_admin = session.get(\"is_admin\", False)", "action]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if action not in (\"accept\", \"reject\"):", "day_count = User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count() except Exception as e:", "查询新闻 news = News.query.get(news_id) except Exception as e: current_app.logger.error(e) if", "False) if not user_id or not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return", "as e: return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") # 2. 将标题图片上传到七牛 try: key", "admin_index(): user = g.user return render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request def before_request():", "if not reason: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason = reason news.status", "= Category.query.all() categories_li = [] for category in categories: c_dict", "errmsg=\"参数错误\") # 判断是否有分类id if category_id: try: category = Category.query.get(category_id) except", "= time.localtime() mon_begin = \"%d-%02d-01\" % (now.tm_year, now.tm_mon) mon_begin_date =", "1 news_list = list() current_page = 1 total_page = 1", "Exception as e: current_app.logger.error(e) # 查询图表信息 # 获取到当天00:00:00时间 now_date =", "e: current_app.logger.error(e) if not news: data = { \"errmsg\": \"未查询到数据\"", "active_date = list() active_count = list() # 依次添加数据,再反转 for i", "@admin_blu.route(\"/user_list\") def user_list(): \"\"\"获取用户列表\"\"\" # 获取参数 page = request.args.get(\"p\", 1)", "@admin_blu.route(\"/index\") @user_login_data def admin_index(): user = g.user return render_template(\"admin/index.html\", user=user.to_dict())", "datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count = User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count() except Exception", "from flask import request import random import re from flask", "from . import admin_blu @admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"]) def admin_login(): if", "User.query.filter(User.is_admin == False).count() except Exception as e: current_app.logger.error(e) # 查询月新增数", "= Category.query.all() # 定义列表保存分类数据 categories_dicts = [] for category in", "User.query.filter(User.is_admin == False, User.last_login >= begin_date, User.last_login < end_date).count() print(count)", "@admin_blu.route(\"/add_category\", methods=[\"POST\"]) def add_category(): \"\"\"修改或者添加分类\"\"\" category_id = request.json.get(\"id\") category_name =", "info.utils.image_storage import storage from info.utils.response_code import RET from info.modules.passport import", "index_image = index_image.read() except Exception as e: return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")", "render_template(\"admin/news_edit_detail.html\", data=data) # 查询新闻 news = None try: news =", "user=user.to_dict()) @admin_blu.before_request def before_request(): # 判断如果不是登陆页面的请求 if not request.url.endswith(url_for(\"admin.admin_login\")): user_id", "= { \"total_page\": total_page, \"current_page\": current_page, \"new_list\": news_dict_list, \"last_input\": b", "current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse() data = {\"total_count\": total_count, \"mon_count\": mon_count,", "e: current_app.logger.error(e) # 将模型列表转换成字典列表 users_list = [] for user in", "# 取到登陆的参数 username = request.form.get(\"username\") password = request.form.get(\"password\") if not", "news.title = title news.digest = digest news.content = content news.category_id", "if user_id and is_admin: return redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\") # 取到登陆的参数", "errmsg=\"未找到新闻数据\") # 1.2 尝试读取图片 if index_image: try: index_image = index_image.read()", "return jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def get_news_category(): # 获取所有的分类数据 categories =", "1 try: filters = list() # 如果有关键词 if keywords: #", "count = User.query.filter(User.is_admin == False, User.last_login >= begin_date, User.last_login <", "session[\"mobile\"] = user.mobile session[\"is_admin\"] = True # 跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\"))", "categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0) data = { \"news\": news.to_dict(), \"categories\":", "news = None try: # 3. 查询新闻 news = News.query.get(news_id)", "mon_begin_date).count() except Exception as e: current_app.logger.error(e) day_count = 0 try:", "data=data) # 返回数据 data = { \"news\": news.to_dict() } return", "day_begin_date).count() except Exception as e: current_app.logger.error(e) # 查询图表信息 # 获取到当天00:00:00时间", "False) if user_id and is_admin: return redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\") #", "mon_begin_date = datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count = User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count()", "Category.query.all() categories_li = [] for category in categories: c_dict =", "# 3. 查询新闻 news = News.query.get(news_id) except Exception as e:", "info import constants, db from info import redis_store from info.lib.yuntongxun.sms", "return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") # 2. 将标题图片上传到七牛 try: key = storage(index_image)", "获取参数 page = request.args.get(\"p\", 1) try: print(page) page = int(page)", "判断参数 if not all([news_id, action]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if action", "flask import redirect from flask import render_template from flask import", "\"%Y-%m-%d\") mon_count = User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count() except Exception as", "e: current_app.logger.error(e) page = 1 # 设置变量默认值 users = []", "return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news = None try: # 3. 查询新闻", "\"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) categories = Category.query.all() categories_li =", "# 通过id查询新闻 news = None try: news = News.query.get(news_id) except", "jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") # 2. 将标题图片上传到七牛 try: key = storage(index_image) except", "list() current_page = 1 total_page = 1 try: filters =", "total_count, \"mon_count\": mon_count, \"day_count\": day_count, \"active_date\": active_date, \"active_count\": active_count} return", "\"reject\"): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news = None try: # 3.", "not category_name: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") # 判断是否有分类id if category_id: try:", "not user: return render_template(\"admin/login.html\", errmsg=\"用户名错误\") if not user.check_password(password): return render_template(\"admin/login.html\",", "1 total_page = 1 #查询数据 try: paginate = User.query.filter(User.is_admin ==", "3. 查询新闻 news = News.query.get(news_id) except Exception as e: current_app.logger.error(e)", "import render_template from flask import request from flask import session", "category_id: try: category = Category.query.get(category_id) except Exception as e: current_app.logger.error(e)", "{ \"total_page\": total_page, \"current_page\": current_page, \"users\": users_list } return render_template(\"admin/user_list.html\",", "False) news_list = paginate.items current_page = paginate.page total_page = paginate.pages", "Exception as e: current_app.error(e) news_dict_list = list() for news in", "render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request def before_request(): # 判断如果不是登陆页面的请求 if not request.url.endswith(url_for(\"admin.admin_login\")):", "def add_category(): \"\"\"修改或者添加分类\"\"\" category_id = request.json.get(\"id\") category_name = request.json.get(\"name\") print(category_name)", "news.to_dict(), \"categories\": categories_li } return render_template(\"admin/news_edit_detail.html\", data=data) news_id = request.form.get(\"news_id\")", "= request.form.get(\"category_id\") # 1.1 判断数据是否有值: if not all([title, digest, content,", "user_list(): \"\"\"获取用户列表\"\"\" # 获取参数 page = request.args.get(\"p\", 1) try: print(page)", "# 定义列表保存分类数据 categories_dicts = [] for category in categories: #", "= request.form.get(\"username\") password = request.form.get(\"password\") if not all([username, password]): return", "def before_request(): # 判断如果不是登陆页面的请求 if not request.url.endswith(url_for(\"admin.admin_login\")): user_id = session.get(\"user_id\")", "end_date).count() print(count) except Exception as e: current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse()", "== \"GET\": news_id = request.args.get(\"news_id\") if not news_id: data =", "count = 0 try: count = User.query.filter(User.is_admin == False, User.last_login", "request.method == \"GET\": # 去session 中取到指定的值 user_id = session.get(\"user_id\", None)", "password]): return render_template(\"admin/login.html\", errmsg=\"参数错误\") try: user = User.query.filter(User.mobile == username).first()", "= list() # 依次添加数据,再反转 for i in range(0, 31): begin_date", "methods=[\"GET\", \"POST\"]) def news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if request.method == \"GET\": #", "= request.args.get(\"news_id\") if not news_id: data = { \"errmsg\": \"没有找到新闻\"", "c_dict = category.to_dict() c_dict[\"is_selected\"] = False if category.id == News.category_id:", "flask import g from flask import make_response from flask import", "= 0 try: total_count = User.query.filter(User.is_admin == False).count() except Exception", "# 查询 paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list", "# 添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询 paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page,", "timedelta(days=(i - 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count = 0 try: count =", "Exception as e: current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse() data = {\"total_count\":", "Category.query.all() # 定义列表保存分类数据 categories_dicts = [] for category in categories:", "return render_template(\"admin/news_review_detail.html\", data=data) # 返回数据 data = { \"news\": news.to_dict()", "# 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容 data = { \"categories\":", "timedelta(days=i) end_date = now_date - timedelta(days=(i - 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count", "\"mon_count\": mon_count, \"day_count\": day_count, \"active_date\": active_date, \"active_count\": active_count} return render_template(\"admin/user_count.html\",", "@admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"]) def admin_login(): if request.method == \"GET\": #", "= 1 try: filters = [News.status != 0] # 如果有关键词", "e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") # 1.2", "Exception as e: current_app.logger.error(e) return render_template(\"admin/login.html\", errmsg=\"数据错误\") if not user:", "import storage from info.utils.response_code import RET from info.modules.passport import passport_blu", "constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page total_page =", "if index_image: try: index_image = index_image.read() except Exception as e:", "if not user: return render_template(\"admin/login.html\", errmsg=\"用户名错误\") if not user.check_password(password): return", "jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"]) def news_edit():", "before_request(): # 判断如果不是登陆页面的请求 if not request.url.endswith(url_for(\"admin.admin_login\")): user_id = session.get(\"user_id\") is_admin", "current_app.logger.error(e) page = 1 # 设置变量默认值 users = [] current_page", "news.reason = reason news.status = -1 # 保存数据库 try: db.session.commit()", "if not news: data = { \"errmsg\": \"没有找到新闻\" } return", "categery_id) news = None try: news = News.query.get(news_id) except Exception", "[] for user in users: users_list.append(user.to_admin_dict()) context = { \"total_page\":", "data=data) # 执行审核操作 # 1. 获取参数 news_id = request.json.get(\"news_id\") action", "0 try: total_count = User.query.filter(User.is_admin == False).count() except Exception as", "paginate.page total_page = paginate.pages except Exception as e: current_app.error(e) news_dict_list", "None try: # 3. 查询新闻 news = News.query.get(news_id) except Exception", "= request.args.get(\"keywords\", \"\") try: page = int(page) except Exception as", "from flask import g from flask import make_response from flask", "# 获取字典 cate_dict = category.to_dict() # 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) #", "{ \"total_page\": total_page, \"current_page\": current_page, \"news_list\": news_dict_list } return render_template(\"admin/news_review.html\",", "= request.json.get(\"name\") print(category_name) if not category_name: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") #", "except Exception as e: current_app.logger.error(e) if not news: data =", "> mon_begin_date).count() except Exception as e: current_app.logger.error(e) day_count = 0", "1 #查询数据 try: paginate = User.query.filter(User.is_admin == False)\\ .order_by(User.last_login.desc())\\ .paginate(page,", "return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") # 1.2 尝试读取图片 if index_image: try: index_image", "e: current_app.logger.error(e) active_count.append(count) active_date.reverse() active_count.reverse() data = {\"total_count\": total_count, \"mon_count\":", "import redirect from flask import render_template from flask import request", "def news_review(): \"\"\"返回待审核新闻列表\"\"\" page = request.args.get(\"p\", 1) keywords = request.args.get(\"keywords\",", "db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\")", "= digest news.content = content news.category_id = categery_id # 4.", "filters = [News.status != 0] # 如果有关键词 if keywords: #", ">= begin_date, User.last_login < end_date).count() print(count) except Exception as e:", "current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if action ==", "news_id = request.form.get(\"news_id\") title = request.form.get(\"title\") digest= request.form.get(\"digest\") content =", "= content news.category_id = categery_id # 4. 保存到数据库 try: db.session.commit()", "current_app.logger.error(e) if not news: data = { \"errmsg\": \"未查询到数据\" }", "jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") # 判断是否有分类id if category_id: try: category = Category.query.get(category_id)", "} return render_template(\"admin/news_review_detail.html\", data=data) # 返回数据 data = { \"news\":", "\"%d-%02d-01\" % (now.tm_year, now.tm_mon) mon_begin_date = datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count =", "\"\"\"获取用户列表\"\"\" # 获取参数 page = request.args.get(\"p\", 1) try: print(page) page", "# 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\") @admin_blu.route(\"/user_count\") def user_count(): # 查询总人数 total_count", "errmsg=\"参数错误\") news.reason = reason news.status = -1 # 保存数据库 try:", "current_page, \"new_list\": news_dict_list, \"last_input\": b } if request.method == \"GET\":", "except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if not", "active_count.append(count) active_date.reverse() active_count.reverse() data = {\"total_count\": total_count, \"mon_count\": mon_count, \"day_count\":", "import request import random import re from flask import current_app,", "time.localtime() mon_begin = \"%d-%02d-01\" % (now.tm_year, now.tm_mon) mon_begin_date = datetime.strptime(mon_begin,", "\"total_page\": total_page, \"current_page\": current_page, \"news_list\": news_dict_list } return render_template(\"admin/news_review.html\", data=data)", "i in range(0, 31): begin_date = now_date - timedelta(days=i) end_date", "import RET from info.modules.passport import passport_blu from info.models import User,", "定义空数组,保存数据 active_date = list() active_count = list() # 依次添加数据,再反转 for", "for news in news_list: news_dict_list.append(news.to_review_dict()) data = { \"total_page\": total_page,", "db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") # 5. 返回结果 return jsonify(errno=RET.OK, errmsg=\"编辑成功\")", "data = { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) #", "flask import render_template from flask import request from flask import", "# 依次添加数据,再反转 for i in range(0, 31): begin_date = now_date", "category.id == News.category_id: c_dict[\"is_selected\"] = True categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0)", "render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\") def news_review(): \"\"\"返回待审核新闻列表\"\"\" page = request.args.get(\"p\", 1)", "request.method == \"GET\": # 获取参数 news_id = request.args.get(\"news_id\") if not", "import g from flask import make_response from flask import redirect", "paginate.pages except Exception as e: current_app.logger.error(e) news_dict_list = list() for", "import re from flask import current_app, jsonify from flask import", "@admin_blu.route(\"/news_category\") def get_news_category(): # 获取所有的分类数据 categories = Category.query.all() # 定义列表保存分类数据", "news_dict_list.append(news.to_basic_dict()) data = { \"total_page\": total_page, \"current_page\": current_page, \"new_list\": news_dict_list,", "= User.query.filter(User.is_admin == False)\\ .order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users =", "= session.get(\"is_admin\", False) if not user_id or not is_admin: #", "else: # 拒绝通过,需要获取原因 reason = request.json.get(\"reason\") if not reason: return", "@admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"]) def news_edit(): \"\"\"返回新闻列表\"\"\" page = request.args.get(\"p\", \"1\")", "print(page) a = re.match(r\"^\\d*\", page) b = re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b)", "datetime, timedelta from . import admin_blu @admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"]) def", "current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if not category: return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\")", "import url_for import time from info import constants, db from", "category.name = category_name return jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else: # 如果没有分类id, 添加分类", "db from info import redis_store from info.lib.yuntongxun.sms import CCP from", "session.get(\"is_admin\", False) if not user_id or not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页", "print(now_date) # 定义空数组,保存数据 active_date = list() active_count = list() #", "in news_list: news_dict_list.append(news.to_review_dict()) data = { \"total_page\": total_page, \"current_page\": current_page,", "news: return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if action == \"accept\": news.status =", "user.check_password(password): return render_template(\"admin/login.html\", errmsg=\"密码错误\") if not user.is_admin: return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\")", "begin_date, User.last_login < end_date).count() print(count) except Exception as e: current_app.logger.error(e)", "total_page = 1 try: filters = list() # 如果有关键词 if", "# 如果有关键词 if keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询 paginate", "as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url = constants.QINIU_DOMIN_PREFIX +", "in (\"accept\", \"reject\"): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news = None try:", "1 # 设置变量默认值 users = [] current_page = 1 total_page", "captcha from info.utils.image_storage import storage from info.utils.response_code import RET from", "= datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date) # 定义空数组,保存数据 active_date = list() active_count", "for category in categories: # 获取字典 cate_dict = category.to_dict() #", "int(page) except Exception as e: current_app.logger.error(e) page = 1 #", "categery_id]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title, digest, content, categery_id) news =", "in categories: # 获取字典 cate_dict = category.to_dict() # 拼接内容 categories_dicts.append(cate_dict)", "from flask import make_response from flask import redirect from flask", "{ \"news\": news.to_dict() } return render_template(\"admin/news_review_detail.html\", data=data) # 执行审核操作 #", "1.1 判断数据是否有值: if not all([title, digest, content, categery_id]): return jsonify(errno=RET.PARAMERR,", "# 返回数据 data = { \"news\": news.to_dict() } return render_template(\"admin/news_review_detail.html\",", "news_dict_list, \"last_input\": b } if request.method == \"GET\": return render_template(\"admin/news_edit.html\",", "print(title, digest, content, categery_id) news = None try: news =", "storage from info.utils.response_code import RET from info.modules.passport import passport_blu from", "a = re.match(r\"^\\d*\", page) b = re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b) page", "\"accept\": news.status = 0 else: # 拒绝通过,需要获取原因 reason = request.json.get(\"reason\")", "list() # 如果有关键词 if keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询", "try: index_image = index_image.read() except Exception as e: return jsonify(errno=RET.PARAMERR,", "news_dict_list = list() for news in news_list: news_dict_list.append(news.to_review_dict()) data =", "errmsg=\"参数错误\") news = None try: # 3. 查询新闻 news =", "paginate.pages except Exception as e: current_app.error(e) news_dict_list = list() for", "errmsg=\"参数错误\") if action not in (\"accept\", \"reject\"): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")", "new_category.name = category_name db.session.add(new_category) db.session.commit() except Exception as e: current_app.logger.error(e)", "Category, News from info.modules.profile import profile_blu from info.utils.common import user_login_data", "timedelta from . import admin_blu @admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"]) def admin_login():", "def user_list(): \"\"\"获取用户列表\"\"\" # 获取参数 page = request.args.get(\"p\", 1) try:", "page = request.args.get(\"p\", 1) try: print(page) page = int(page) except", "data=context) @admin_blu.route(\"/news_review\") def news_review(): \"\"\"返回待审核新闻列表\"\"\" page = request.args.get(\"p\", 1) keywords", "if b != []: b = b[0] keywords = b", "[] for category in categories: # 获取字典 cate_dict = category.to_dict()", "datetime.strptime(day_begin, \"%Y-%m-%d\") day_count = User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count() except Exception", "add_category(): \"\"\"修改或者添加分类\"\"\" category_id = request.json.get(\"id\") category_name = request.json.get(\"name\") print(category_name) if", "request.form.get(\"title\") digest= request.form.get(\"digest\") content = request.form.get(\"content\") index_image = request.form.get(\"index-image\") categery_id", "not reason: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason = reason news.status =", "\"total_page\": total_page, \"current_page\": current_page, \"users\": users_list } return render_template(\"admin/user_list.html\", data=context)", "category in categories: # 获取字典 cate_dict = category.to_dict() # 拼接内容", "data = { \"news\": news.to_dict(), \"categories\": categories_li } return render_template(\"admin/news_edit_detail.html\",", "keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询 paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\", "total_count = 0 try: total_count = User.query.filter(User.is_admin == False).count() except", "session[\"is_admin\"] = True # 跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data def", "\"current_page\": current_page, \"news_list\": news_dict_list } return render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\",", "categories = Category.query.all() # 定义列表保存分类数据 categories_dicts = [] for category", "return jsonify(errno=RET.OK, errmsg=\"OK\") return render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"]) def", "in news_list: news_dict_list.append(news.to_basic_dict()) data = { \"total_page\": total_page, \"current_page\": current_page,", "Exception as e: current_app.logger.error(e) day_count = 0 try: day_begin =", "all([news_id, action]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if action not in (\"accept\",", "Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url = constants.QINIU_DOMIN_PREFIX", "render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\") def user_list(): \"\"\"获取用户列表\"\"\" # 获取参数 page =", "查询图表信息 # 获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date) # 定义空数组,保存数据", "request.args.get(\"news_id\") if not news_id: data = { \"errmsg\": \"未查询到数据\" }", "category.to_dict() # 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容 data = {", "current_app.logger.error(e) # 将模型列表转换成字典列表 users_list = [] for user in users:", "= re.match(r\"^\\d*\", page) b = re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b) page =", "\"POST\"]) def news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if request.method == \"GET\": # 获取参数", "time from info import constants, db from info import redis_store", "import passport_blu from info.models import User, Category, News from info.modules.profile", "= paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e)", "== username).first() except Exception as e: current_app.logger.error(e) return render_template(\"admin/login.html\", errmsg=\"数据错误\")", "\"users\": users_list } return render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\") def news_review(): \"\"\"返回待审核新闻列表\"\"\"", "< end_date).count() print(count) except Exception as e: current_app.logger.error(e) active_count.append(count) active_date.reverse()", "import random import re from flask import current_app, jsonify from", "\"active_date\": active_date, \"active_count\": active_count} return render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\") def user_list():", "= 1 news_list = list() current_page = 1 total_page =", "not news_id: data = { \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\",", "= b else: keywords = None b = \"\" try:", "+ key # 3. 设置相关数据 news.title = title news.digest =", "= session.get(\"is_admin\", False) if user_id and is_admin: return redirect(url_for(\"admin_index\")) return", "categories_dicts.pop(0) # 返回内容 data = { \"categories\": categories_dicts } return", "= g.user return render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request def before_request(): # 判断如果不是登陆页面的请求", "\"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) # 查询新闻 news =", "User.create_time > mon_begin_date).count() except Exception as e: current_app.logger.error(e) day_count =", "try: filters = list() # 如果有关键词 if keywords: # 添加关键词的检索选项", "active_count} return render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\") def user_list(): \"\"\"获取用户列表\"\"\" # 获取参数", "return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name = category_name return jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else:", "random import re from flask import current_app, jsonify from flask", "title news.digest = digest news.content = content news.category_id = categery_id", "return render_template(\"admin/login.html\", errmsg=\"用户名错误\") if not user.check_password(password): return render_template(\"admin/login.html\", errmsg=\"密码错误\") if", "list() for news in news_list: news_dict_list.append(news.to_basic_dict()) data = { \"total_page\":", "except Exception as e: current_app.logger.error(e) page = 1 # 设置变量默认值", "Exception as e: current_app.logger.error(e) # 查询月新增数 mon_count = 0 try:", "list() # 依次添加数据,再反转 for i in range(0, 31): begin_date =", "Exception as e: return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") # 2. 将标题图片上传到七牛 try:", "执行审核操作 # 1. 获取参数 news_id = request.json.get(\"news_id\") action = request.json.get(\"action\")", "if not user.check_password(password): return render_template(\"admin/login.html\", errmsg=\"密码错误\") if not user.is_admin: return", "list() for news in news_list: news_dict_list.append(news.to_review_dict()) data = { \"total_page\":", "依次添加数据,再反转 for i in range(0, 31): begin_date = now_date -", "data=data) news_id = request.form.get(\"news_id\") title = request.form.get(\"title\") digest= request.form.get(\"digest\") content", "render_template(\"admin/login.html\", errmsg=\"用户名错误\") if not user.check_password(password): return render_template(\"admin/login.html\", errmsg=\"密码错误\") if not", "e: return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") # 2. 将标题图片上传到七牛 try: key =", "keywords = None b = \"\" try: page = int(page)", "= User.query.filter(User.is_admin == False, User.last_login >= begin_date, User.last_login < end_date).count()", "info.utils.captcha.captcha import captcha from info.utils.image_storage import storage from info.utils.response_code import", "== \"GET\": # 获取参数 news_id = request.args.get(\"news_id\") if not news_id:", "page) print(b) page = a.group() if b != []: b", "= { \"categories\": categories_dicts } return render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"])", "} return render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\") def news_review(): \"\"\"返回待审核新闻列表\"\"\" page =", "if not news: return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if action == \"accept\":", "request from flask import session from flask import url_for import", "try: key = storage(index_image) except Exception as e: current_app.logger.error(e) return", "if category_id: try: category = Category.query.get(category_id) except Exception as e:", "category: return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name = category_name return jsonify(errno=RET.OK, errmsg=\"保存数据成功\")", "try: count = User.query.filter(User.is_admin == False, User.last_login >= begin_date, User.last_login", "request.json.get(\"reason\") if not reason: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason = reason", "if not user_id or not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\")", "digest, content, categery_id) news = None try: news = News.query.get(news_id)", "filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list =", "db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"])", "list() active_count = list() # 依次添加数据,再反转 for i in range(0,", "# 获取新闻id if request.method == \"GET\": news_id = request.args.get(\"news_id\") if", "News from info.modules.profile import profile_blu from info.utils.common import user_login_data from", "request.form.get(\"category_id\") # 1.1 判断数据是否有值: if not all([title, digest, content, categery_id]):", "user.is_admin: return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"] = user.id session[\"nick_name\"] = user.nick_name", "0 try: count = User.query.filter(User.is_admin == False, User.last_login >= begin_date,", "flask import request from flask import session from flask import", "not all([title, digest, content, categery_id]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title, digest,", "data = { \"news\": news.to_dict() } return render_template(\"admin/news_review_detail.html\", data=data) #", "render_template(\"admin/news_review_detail.html\", data=data) # 通过id查询新闻 news = None try: news =", "# 2. 将标题图片上传到七牛 try: key = storage(index_image) except Exception as", "= { \"news\": news.to_dict() } return render_template(\"admin/news_review_detail.html\", data=data) # 执行审核操作", "= 1 total_page = 1 try: filters = [News.status !=", "Category.query.get(category_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if", "= int(page) except Exception as e: current_app.logger.error(e) page = 1", "now.tm_mon, now.tm_mday) day_begin_date = datetime.strptime(day_begin, \"%Y-%m-%d\") day_count = User.query.filter(User.is_admin==False, User.create_time", "errmsg=\"未查询到分类信息\") category.name = category_name return jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else: # 如果没有分类id,", "request.args.get(\"p\", 1) keywords = request.args.get(\"keywords\", \"\") try: page = int(page)", "mon_count, \"day_count\": day_count, \"active_date\": active_date, \"active_count\": active_count} return render_template(\"admin/user_count.html\", data=data)", "= index_image.read() except Exception as e: return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") #", "% (now.tm_year, now.tm_mon) mon_begin_date = datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count = User.query.filter(User.is_admin==False,", "now = time.localtime() mon_begin = \"%d-%02d-01\" % (now.tm_year, now.tm_mon) mon_begin_date", "users = [] current_page = 1 total_page = 1 #查询数据", "return jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"]) def news_edit(): \"\"\"返回新闻列表\"\"\" page", "request.form.get(\"news_id\") title = request.form.get(\"title\") digest= request.form.get(\"digest\") content = request.form.get(\"content\") index_image", "if not news_id: data = { \"errmsg\": \"未查询到数据\" } return", "news.category_id = categery_id # 4. 保存到数据库 try: db.session.commit() except Exception", "categery_id = request.form.get(\"category_id\") # 1.1 判断数据是否有值: if not all([title, digest,", "info.models import User, Category, News from info.modules.profile import profile_blu from", "{ \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 通过id查询新闻 news", "errmsg=\"参数有误\") print(title, digest, content, categery_id) news = None try: news", "else: keywords = None b = \"\" try: page =", "News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not news: return", "\"POST\"]) def admin_login(): if request.method == \"GET\": # 去session 中取到指定的值", "user.mobile session[\"is_admin\"] = True # 跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data", "# 查询新闻 news = None try: news = News.query.get(news_id) except", "current_app.error(e) news_dict_list = list() for news in news_list: news_dict_list.append(news.to_review_dict()) data", "= None b = \"\" try: page = int(page) except", "{ \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) # 查询新闻 news", "e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url = constants.QINIU_DOMIN_PREFIX + key", "-1 # 保存数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e)", "# 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)", "= list() # 如果有关键词 if keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords)) #", "request.form.get(\"index-image\") categery_id = request.form.get(\"category_id\") # 1.1 判断数据是否有值: if not all([title,", "now.tm_mday) day_begin_date = datetime.strptime(day_begin, \"%Y-%m-%d\") day_count = User.query.filter(User.is_admin==False, User.create_time >=", "查询月新增数 mon_count = 0 try: now = time.localtime() mon_begin =", "\"total_page\": total_page, \"current_page\": current_page, \"new_list\": news_dict_list, \"last_input\": b } if", "\"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 通过id查询新闻 news = None", "end_date = now_date - timedelta(days=(i - 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count =", "a.group() if b != []: b = b[0] keywords =", "as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"操作成功\")", "index_image.read() except Exception as e: return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") # 2.", "[] for category in categories: c_dict = category.to_dict() c_dict[\"is_selected\"] =", "request.form.get(\"content\") index_image = request.form.get(\"index-image\") categery_id = request.form.get(\"category_id\") # 1.1 判断数据是否有值:", "= now_date - timedelta(days=(i - 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count = 0", "import current_app, jsonify from flask import g from flask import", "False) users = paginate.items current_page = paginate.page total_page = paginate.pages", "# 将模型列表转换成字典列表 users_list = [] for user in users: users_list.append(user.to_admin_dict())", "not all([news_id, action]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if action not in", "re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b) page = a.group() if b != []:", "username = request.form.get(\"username\") password = request.form.get(\"password\") if not all([username, password]):", "total_page = paginate.pages except Exception as e: current_app.error(e) news_dict_list =", "request.form.get(\"username\") password = request.form.get(\"password\") if not all([username, password]): return render_template(\"admin/login.html\",", "if not category_name: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") # 判断是否有分类id if category_id:", "import profile_blu from info.utils.common import user_login_data from datetime import datetime,", "data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"]) def news_review_detail(): \"\"\"新闻审核\"\"\" # 获取新闻id if", "{ \"news\": news.to_dict(), \"categories\": categories_li } return render_template(\"admin/news_edit_detail.html\", data=data) news_id", "返回结果 return jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def get_news_category(): # 获取所有的分类数据 categories", "= \"%d-%02d-%02d\" % (now.tm_year, now.tm_mon, now.tm_mday) day_begin_date = datetime.strptime(day_begin, \"%Y-%m-%d\")", "categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容 data = { \"categories\": categories_dicts }", "{ \"categories\": categories_dicts } return render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"]) def", "render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"]) def add_category(): \"\"\"修改或者添加分类\"\"\" category_id = request.json.get(\"id\")", "jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title, digest, content, categery_id) news = None try:", "如果有关键词 if keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\", "if request.method == \"GET\": news_id = request.args.get(\"news_id\") if not news_id:", "\"GET\": return render_template(\"admin/news_edit.html\", data=data) # return jsonify(errno=RET.OK, errmsg=\"OK\") return render_template(\"admin/news_edit.html\",", "= category.to_dict() # 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容 data =", "添加分类 try: new_category = Category() new_category.id = category_id new_category.name =", "for i in range(0, 31): begin_date = now_date - timedelta(days=i)", "except Exception as e: current_app.logger.error(e) # 查询月新增数 mon_count = 0", "if action not in (\"accept\", \"reject\"): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news", "如果有关键词 if keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords)) # 查询 paginate =", "day_count, \"active_date\": active_date, \"active_count\": active_count} return render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\") def", "b != []: b = b[0] keywords = b else:", "{ \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 返回数据 data", "render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"]) def news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if request.method", "render_template(\"admin/news_edit_detail.html\", data=data) news_id = request.form.get(\"news_id\") title = request.form.get(\"title\") digest= request.form.get(\"digest\")", "user_id = session.get(\"user_id\") is_admin = session.get(\"is_admin\", False) if not user_id", "except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url =", "1 total_page = 1 try: filters = [News.status != 0]", "category = Category.query.get(category_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR,", ". import admin_blu @admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"]) def admin_login(): if request.method", "拒绝通过,需要获取原因 reason = request.json.get(\"reason\") if not reason: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")", "request import random import re from flask import current_app, jsonify", "is_admin: return redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\") # 取到登陆的参数 username = request.form.get(\"username\")", "\"news\": news.to_dict() } return render_template(\"admin/news_review_detail.html\", data=data) # 执行审核操作 # 1.", "== False)\\ .order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users = paginate.items current_page", "return render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\") def news_review(): \"\"\"返回待审核新闻列表\"\"\" page = request.args.get(\"p\",", "如果没有分类id, 添加分类 try: new_category = Category() new_category.id = category_id new_category.name", "key = storage(index_image) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR,", "= request.args.get(\"p\", 1) keywords = request.args.get(\"keywords\", \"\") try: page =", "= None try: news = News.query.get(news_id) except Exception as e:", "not user_id or not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\") @admin_blu.route(\"/user_count\")", "index_image: try: index_image = index_image.read() except Exception as e: return", "False).count() except Exception as e: current_app.logger.error(e) # 查询月新增数 mon_count =", "key # 3. 设置相关数据 news.title = title news.digest = digest", "render_template(\"admin/news_edit_detail.html\", data=data) categories = Category.query.all() categories_li = [] for category", "获取参数 news_id = request.json.get(\"news_id\") action = request.json.get(\"action\") #2. 判断参数 if", "from datetime import datetime, timedelta from . import admin_blu @admin_blu.route(\"/login\",", "user.id session[\"nick_name\"] = user.nick_name session[\"mobile\"] = user.mobile session[\"is_admin\"] = True", "except Exception as e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA,", "redirect(\"/\") @admin_blu.route(\"/user_count\") def user_count(): # 查询总人数 total_count = 0 try:", "= session.get(\"user_id\") is_admin = session.get(\"is_admin\", False) if not user_id or", "from info.utils.response_code import RET from info.modules.passport import passport_blu from info.models", "1. 获取参数 news_id = request.json.get(\"news_id\") action = request.json.get(\"action\") #2. 判断参数", "index_image = request.form.get(\"index-image\") categery_id = request.form.get(\"category_id\") # 1.1 判断数据是否有值: if", "not user.check_password(password): return render_template(\"admin/login.html\", errmsg=\"密码错误\") if not user.is_admin: return render_template(\"admin/login.html\",", "if not all([title, digest, content, categery_id]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\") print(title,", "return render_template(\"admin/news_edit_detail.html\", data=data) # 查询新闻 news = None try: news", "去session 中取到指定的值 user_id = session.get(\"user_id\", None) is_admin = session.get(\"is_admin\", False)", "<reponame>moonbria/test1 from flask import request import random import re from", "categories_li.pop(0) data = { \"news\": news.to_dict(), \"categories\": categories_li } return", "news.index_image_url = constants.QINIU_DOMIN_PREFIX + key # 3. 设置相关数据 news.title =", "day_begin = \"%d-%02d-%02d\" % (now.tm_year, now.tm_mon, now.tm_mday) day_begin_date = datetime.strptime(day_begin,", "news_list: news_dict_list.append(news.to_basic_dict()) data = { \"total_page\": total_page, \"current_page\": current_page, \"new_list\":", "if not category: return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name = category_name return", "from flask import request from flask import session from flask", "True # 跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data def admin_index(): user", "\"active_count\": active_count} return render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\") def user_list(): \"\"\"获取用户列表\"\"\" #", "action == \"accept\": news.status = 0 else: # 拒绝通过,需要获取原因 reason", "news_edit(): \"\"\"返回新闻列表\"\"\" page = request.args.get(\"p\", \"1\") print(page) a = re.match(r\"^\\d*\",", "判断数据是否有值: if not all([title, digest, content, categery_id]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")", "errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"]) def news_edit(): \"\"\"返回新闻列表\"\"\" page = request.args.get(\"p\",", "active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count = 0 try: count = User.query.filter(User.is_admin == False,", "errmsg=\"密码错误\") if not user.is_admin: return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"] = user.id", "import time from info import constants, db from info import", "设置变量默认值 users = [] current_page = 1 total_page = 1", "re from flask import current_app, jsonify from flask import g", "categories_li } return render_template(\"admin/news_edit_detail.html\", data=data) news_id = request.form.get(\"news_id\") title =", "not all([username, password]): return render_template(\"admin/login.html\", errmsg=\"参数错误\") try: user = User.query.filter(User.mobile", "except Exception as e: current_app.logger.error(e) news_dict_list = list() for news", "} return render_template(\"admin/news_edit_detail.html\", data=data) categories = Category.query.all() categories_li = []", "True categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0) data = { \"news\": news.to_dict(),", "定义列表保存分类数据 categories_dicts = [] for category in categories: # 获取字典", "User.query.filter(User.mobile == username).first() except Exception as e: current_app.logger.error(e) return render_template(\"admin/login.html\",", "range(0, 31): begin_date = now_date - timedelta(days=i) end_date = now_date", "as e: current_app.logger.error(e) news_dict_list = list() for news in news_list:", "\"%d-%02d-%02d\" % (now.tm_year, now.tm_mon, now.tm_mday) day_begin_date = datetime.strptime(day_begin, \"%Y-%m-%d\") day_count", "== False).count() except Exception as e: current_app.logger.error(e) # 查询月新增数 mon_count", "c_dict[\"is_selected\"] = True categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0) data = {", "b else: keywords = None b = \"\" try: page", "= { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) categories =", "current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") # 5. 返回结果 return jsonify(errno=RET.OK,", "render_template(\"admin/login.html\") # 取到登陆的参数 username = request.form.get(\"username\") password = request.form.get(\"password\") if", "user = g.user return render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request def before_request(): #", "# 设置变量默认值 users = [] current_page = 1 total_page =", "= [] for category in categories: c_dict = category.to_dict() c_dict[\"is_selected\"]", "b = \"\" try: page = int(page) except Exception as", "session.get(\"user_id\") is_admin = session.get(\"is_admin\", False) if not user_id or not", "\"%Y-%m-%d\") day_count = User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count() except Exception as", "categories = Category.query.all() categories_li = [] for category in categories:", "page = request.args.get(\"p\", 1) keywords = request.args.get(\"keywords\", \"\") try: page", "尝试读取图片 if index_image: try: index_image = index_image.read() except Exception as", "[]: b = b[0] keywords = b else: keywords =", "= False if category.id == News.category_id: c_dict[\"is_selected\"] = True categories_li.append(c_dict)", "# 拒绝通过,需要获取原因 reason = request.json.get(\"reason\") if not reason: return jsonify(errno=RET.PARAMERR,", "users_list = [] for user in users: users_list.append(user.to_admin_dict()) context =", "try: page = int(page) except Exception as e: current_app.logger.error(e) page", "except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return", "def news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if request.method == \"GET\": # 获取参数 news_id", "redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\") # 取到登陆的参数 username = request.form.get(\"username\") password =", "import admin_blu @admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"]) def admin_login(): if request.method ==", "def admin_login(): if request.method == \"GET\": # 去session 中取到指定的值 user_id", "not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\") @admin_blu.route(\"/user_count\") def user_count(): #", "except Exception as e: current_app.logger.error(e) # 查询图表信息 # 获取到当天00:00:00时间 now_date", "flask import session from flask import url_for import time from", "session[\"user_id\"] = user.id session[\"nick_name\"] = user.nick_name session[\"mobile\"] = user.mobile session[\"is_admin\"]", "将标题图片上传到七牛 try: key = storage(index_image) except Exception as e: current_app.logger.error(e)", "b } if request.method == \"GET\": return render_template(\"admin/news_edit.html\", data=data) #", "news_id = request.json.get(\"news_id\") action = request.json.get(\"action\") #2. 判断参数 if not", "try: now = time.localtime() mon_begin = \"%d-%02d-01\" % (now.tm_year, now.tm_mon)", "try: paginate = User.query.filter(User.is_admin == False)\\ .order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)", ">= day_begin_date).count() except Exception as e: current_app.logger.error(e) # 查询图表信息 #", "as e: current_app.logger.error(e) day_count = 0 try: day_begin = \"%d-%02d-%02d\"", "import request from flask import session from flask import url_for", "None) is_admin = session.get(\"is_admin\", False) if user_id and is_admin: return", "} return render_template(\"admin/news_type.html\", data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"]) def add_category(): \"\"\"修改或者添加分类\"\"\" category_id", "is_admin = session.get(\"is_admin\", False) if user_id and is_admin: return redirect(url_for(\"admin_index\"))", "None try: news = News.query.get(news_id) except Exception as e: current_app.logger.error(e)", "跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data def admin_index(): user = g.user", "print(b) page = a.group() if b != []: b =", "== \"accept\": news.status = 0 else: # 拒绝通过,需要获取原因 reason =", "from info.modules.profile import profile_blu from info.utils.common import user_login_data from datetime", "not request.url.endswith(url_for(\"admin.admin_login\")): user_id = session.get(\"user_id\") is_admin = session.get(\"is_admin\", False) if", "False)\\ .order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users = paginate.items current_page =", "data=data) # 查询新闻 news = None try: news = News.query.get(news_id)", "将模型列表转换成字典列表 users_list = [] for user in users: users_list.append(user.to_admin_dict()) context", "\"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) categories = Category.query.all() categories_li", "request.args.get(\"p\", \"1\") print(page) a = re.match(r\"^\\d*\", page) b = re.findall(r\"\"\"keywords=(\\w*)\"\"\",", "1) try: print(page) page = int(page) except Exception as e:", "\"news\": news.to_dict(), \"categories\": categories_li } return render_template(\"admin/news_edit_detail.html\", data=data) news_id =", "news in news_list: news_dict_list.append(news.to_basic_dict()) data = { \"total_page\": total_page, \"current_page\":", ".paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page total_page", "now_date - timedelta(days=(i - 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count = 0 try:", "判断如果不是登陆页面的请求 if not request.url.endswith(url_for(\"admin.admin_login\")): user_id = session.get(\"user_id\") is_admin = session.get(\"is_admin\",", "0 try: day_begin = \"%d-%02d-%02d\" % (now.tm_year, now.tm_mon, now.tm_mday) day_begin_date", "31): begin_date = now_date - timedelta(days=i) end_date = now_date -", "= paginate.pages except Exception as e: current_app.error(e) news_dict_list = list()", "from flask import url_for import time from info import constants,", "as e: current_app.logger.error(e) if not news: data = { \"errmsg\":", "return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") # 判断是否有分类id if category_id: try: category =", "# 获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date) # 定义空数组,保存数据 active_date", "render_template(\"admin/login.html\", errmsg=\"密码错误\") if not user.is_admin: return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"] =", "not news: data = { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\",", "jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if not category: return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name =", "users: users_list.append(user.to_admin_dict()) context = { \"total_page\": total_page, \"current_page\": current_page, \"users\":", "\"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 返回数据 data = {", "User.last_login < end_date).count() print(count) except Exception as e: current_app.logger.error(e) active_count.append(count)", "not news_id: data = { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\",", "e: current_app.logger.error(e) # 查询图表信息 # 获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\")", "if not news: data = { \"errmsg\": \"未查询到数据\" } return", "# 定义空数组,保存数据 active_date = list() active_count = list() # 依次添加数据,再反转", "username).first() except Exception as e: current_app.logger.error(e) return render_template(\"admin/login.html\", errmsg=\"数据错误\") if", "if request.method == \"GET\": # 获取参数 news_id = request.args.get(\"news_id\") if", "in categories: c_dict = category.to_dict() c_dict[\"is_selected\"] = False if category.id", "flask import url_for import time from info import constants, db", "return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if not category: return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name", "jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") # 5. 返回结果 return jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def", "current_page = 1 total_page = 1 #查询数据 try: paginate =", "now.tm_mon) mon_begin_date = datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count = User.query.filter(User.is_admin==False, User.create_time >", "jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def get_news_category(): # 获取所有的分类数据 categories = Category.query.all()", "category_name db.session.add(new_category) db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return", "jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else: # 如果没有分类id, 添加分类 try: new_category = Category()", "render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"]) def news_review_detail(): \"\"\"新闻审核\"\"\" # 获取新闻id", "as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") # 5. 返回结果", "try: total_count = User.query.filter(User.is_admin == False).count() except Exception as e:", "or not is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\") @admin_blu.route(\"/user_count\") def user_count():", "current_app.logger.error(e) page = 1 news_list = list() current_page = 1", "make_response from flask import redirect from flask import render_template from", "\"GET\": # 获取参数 news_id = request.args.get(\"news_id\") if not news_id: data", "page) b = re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b) page = a.group() if", "in users: users_list.append(user.to_admin_dict()) context = { \"total_page\": total_page, \"current_page\": current_page,", "password = request.form.get(\"password\") if not all([username, password]): return render_template(\"admin/login.html\", errmsg=\"参数错误\")", "news_dict_list } return render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"]) def news_review_detail():", "request.json.get(\"id\") category_name = request.json.get(\"name\") print(category_name) if not category_name: return jsonify(errno=RET.PARAMERR,", "keywords = request.args.get(\"keywords\", \"\") try: page = int(page) except Exception", "\"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 通过id查询新闻 news =", "not in (\"accept\", \"reject\"): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news = None", "import datetime, timedelta from . import admin_blu @admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"])", "\"GET\": # 去session 中取到指定的值 user_id = session.get(\"user_id\", None) is_admin =", "return render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request def before_request(): # 判断如果不是登陆页面的请求 if not", "is_admin: # 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页 return redirect(\"/\") @admin_blu.route(\"/user_count\") def user_count(): # 查询总人数", "# 查询月新增数 mon_count = 0 try: now = time.localtime() mon_begin", "def admin_index(): user = g.user return render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request def", "return redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\") # 取到登陆的参数 username = request.form.get(\"username\") password", "= now_date - timedelta(days=i) end_date = now_date - timedelta(days=(i -", "news: data = { \"errmsg\": \"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data)", "cate_dict = category.to_dict() # 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容 data", "return render_template(\"admin/login.html\", errmsg=\"密码错误\") if not user.is_admin: return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"]", "Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if not category:", "= reason news.status = -1 # 保存数据库 try: db.session.commit() except", "= [] for user in users: users_list.append(user.to_admin_dict()) context = {", "= News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page", "news = None try: news = News.query.get(news_id) except Exception as", "try: db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR,", "request.args.get(\"keywords\", \"\") try: page = int(page) except Exception as e:", "= 0 try: count = User.query.filter(User.is_admin == False, User.last_login >=", "total_page, \"current_page\": current_page, \"new_list\": news_dict_list, \"last_input\": b } if request.method", "as e: current_app.logger.error(e) # 查询图表信息 # 获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"),", "info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha import captcha from info.utils.image_storage import", "page = 1 # 设置变量默认值 users = [] current_page =", "user_count(): # 查询总人数 total_count = 0 try: total_count = User.query.filter(User.is_admin", "users_list } return render_template(\"admin/user_list.html\", data=context) @admin_blu.route(\"/news_review\") def news_review(): \"\"\"返回待审核新闻列表\"\"\" page", "# 1.2 尝试读取图片 if index_image: try: index_image = index_image.read() except", "= User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count() except Exception as e: current_app.logger.error(e)", "try: # 3. 查询新闻 news = News.query.get(news_id) except Exception as", "5. 返回结果 return jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def get_news_category(): # 获取所有的分类数据", "Exception as e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\")", "@admin_blu.before_request def before_request(): # 判断如果不是登陆页面的请求 if not request.url.endswith(url_for(\"admin.admin_login\")): user_id =", "== \"GET\": return render_template(\"admin/news_edit.html\", data=data) # return jsonify(errno=RET.OK, errmsg=\"OK\") return", "return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url = constants.QINIU_DOMIN_PREFIX + key # 3.", "from flask import redirect from flask import render_template from flask", "total_page = 1 try: filters = [News.status != 0] #", "redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\") @user_login_data def admin_index(): user = g.user return render_template(\"admin/index.html\",", "# 获取所有的分类数据 categories = Category.query.all() # 定义列表保存分类数据 categories_dicts = []", "if not news: return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") # 1.2 尝试读取图片 if", "= paginate.pages except Exception as e: current_app.logger.error(e) # 将模型列表转换成字典列表 users_list", "flask import current_app, jsonify from flask import g from flask", "user in users: users_list.append(user.to_admin_dict()) context = { \"total_page\": total_page, \"current_page\":", "as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"保存数据成功\")", "is_admin = session.get(\"is_admin\", False) if not user_id or not is_admin:", "News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page =", "now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date) # 定义空数组,保存数据 active_date = list()", "if category.id == News.category_id: c_dict[\"is_selected\"] = True categories_li.append(c_dict) # 移除最新分类", "from info.utils.captcha.captcha import captcha from info.utils.image_storage import storage from info.utils.response_code", "jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name = category_name return jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else: #", "Exception as e: current_app.logger.error(e) page = 1 news_list = list()", "errmsg=\"保存数据失败\") # 5. 返回结果 return jsonify(errno=RET.OK, errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def get_news_category():", "as e: current_app.error(e) news_dict_list = list() for news in news_list:", "not news: return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if action == \"accept\": news.status", "= request.json.get(\"action\") #2. 判断参数 if not all([news_id, action]): return jsonify(errno=RET.PARAMERR,", "mon_count = 0 try: now = time.localtime() mon_begin = \"%d-%02d-01\"", "= datetime.strptime(mon_begin, \"%Y-%m-%d\") mon_count = User.query.filter(User.is_admin==False, User.create_time > mon_begin_date).count() except", "\"1\") print(page) a = re.match(r\"^\\d*\", page) b = re.findall(r\"\"\"keywords=(\\w*)\"\"\", page)", "begin_date = now_date - timedelta(days=i) end_date = now_date - timedelta(days=(i", "errmsg=\"未查询到数据\") if action == \"accept\": news.status = 0 else: #", "as e: current_app.logger.error(e) page = 1 # 设置变量默认值 users =", "constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users = paginate.items current_page = paginate.page total_page =", "current_app.logger.error(e) # 查询月新增数 mon_count = 0 try: now = time.localtime()", "context = { \"total_page\": total_page, \"current_page\": current_page, \"users\": users_list }", "not category: return jsonify(errno=RET.NODATA, errmsg=\"未查询到分类信息\") category.name = category_name return jsonify(errno=RET.OK,", "# 4. 保存到数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e)", "g from flask import make_response from flask import redirect from", "admin_login(): if request.method == \"GET\": # 去session 中取到指定的值 user_id =", "= request.args.get(\"p\", 1) try: print(page) page = int(page) except Exception", "data = { \"total_page\": total_page, \"current_page\": current_page, \"new_list\": news_dict_list, \"last_input\":", "constants.QINIU_DOMIN_PREFIX + key # 3. 设置相关数据 news.title = title news.digest", "获取所有的分类数据 categories = Category.query.all() # 定义列表保存分类数据 categories_dicts = [] for", "\"\"\"修改或者添加分类\"\"\" category_id = request.json.get(\"id\") category_name = request.json.get(\"name\") print(category_name) if not", "category_id = request.json.get(\"id\") category_name = request.json.get(\"name\") print(category_name) if not category_name:", "e: current_app.logger.error(e) return render_template(\"admin/login.html\", errmsg=\"数据错误\") if not user: return render_template(\"admin/login.html\",", "1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\")) count = 0 try: count = User.query.filter(User.is_admin ==", "if not user.is_admin: return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"] = user.id session[\"nick_name\"]", "active_count.reverse() data = {\"total_count\": total_count, \"mon_count\": mon_count, \"day_count\": day_count, \"active_date\":", "page = 1 news_list = list() current_page = 1 total_page", "from info.utils.image_storage import storage from info.utils.response_code import RET from info.modules.passport", "except Exception as e: current_app.logger.error(e) page = 1 news_list =", "获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date) # 定义空数组,保存数据 active_date =", ".order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users = paginate.items current_page = paginate.page", "render_template(\"admin/login.html\", errmsg=\"参数错误\") try: user = User.query.filter(User.mobile == username).first() except Exception", "return render_template(\"admin/news_review_detail.html\", data=data) # 通过id查询新闻 news = None try: news", "data = { \"total_page\": total_page, \"current_page\": current_page, \"news_list\": news_dict_list }", "user: return render_template(\"admin/login.html\", errmsg=\"用户名错误\") if not user.check_password(password): return render_template(\"admin/login.html\", errmsg=\"密码错误\")", "# 1. 获取参数 news_id = request.json.get(\"news_id\") action = request.json.get(\"action\") #2.", "redis_store from info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha import captcha from", "0 try: now = time.localtime() mon_begin = \"%d-%02d-01\" % (now.tm_year,", "{ \"total_page\": total_page, \"current_page\": current_page, \"new_list\": news_dict_list, \"last_input\": b }", "news_review_detail(): \"\"\"新闻审核\"\"\" # 获取新闻id if request.method == \"GET\": news_id =", "def user_count(): # 查询总人数 total_count = 0 try: total_count =", "\"\"\"新闻审核\"\"\" # 获取新闻id if request.method == \"GET\": news_id = request.args.get(\"news_id\")", "\"GET\": news_id = request.args.get(\"news_id\") if not news_id: data = {", "= request.json.get(\"news_id\") action = request.json.get(\"action\") #2. 判断参数 if not all([news_id,", "@user_login_data def admin_index(): user = g.user return render_template(\"admin/index.html\", user=user.to_dict()) @admin_blu.before_request", "= user.mobile session[\"is_admin\"] = True # 跳转到后台管理主页,暂未实现 return redirect(url_for(\"admin.admin_index\")) @admin_blu.route(\"/index\")", "category.to_dict() c_dict[\"is_selected\"] = False if category.id == News.category_id: c_dict[\"is_selected\"] =", "= request.args.get(\"p\", \"1\") print(page) a = re.match(r\"^\\d*\", page) b =", "False, User.last_login >= begin_date, User.last_login < end_date).count() print(count) except Exception", "constants, db from info import redis_store from info.lib.yuntongxun.sms import CCP", "user_id and is_admin: return redirect(url_for(\"admin_index\")) return render_template(\"admin/login.html\") # 取到登陆的参数 username", "= user.id session[\"nick_name\"] = user.nick_name session[\"mobile\"] = user.mobile session[\"is_admin\"] =", "current_app.logger.error(e) # 查询图表信息 # 获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date)", "users = paginate.items current_page = paginate.page total_page = paginate.pages except", "!= []: b = b[0] keywords = b else: keywords", "= paginate.pages except Exception as e: current_app.logger.error(e) news_dict_list = list()", "mon_begin = \"%d-%02d-01\" % (now.tm_year, now.tm_mon) mon_begin_date = datetime.strptime(mon_begin, \"%Y-%m-%d\")", "e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if action", "= list() for news in news_list: news_dict_list.append(news.to_basic_dict()) data = {", "return render_template(\"admin/news_edit_detail.html\", data=data) categories = Category.query.all() categories_li = [] for", "category_name return jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else: # 如果没有分类id, 添加分类 try: new_category", "user.nick_name session[\"mobile\"] = user.mobile session[\"is_admin\"] = True # 跳转到后台管理主页,暂未实现 return", "jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if action not in (\"accept\", \"reject\"): return jsonify(errno=RET.PARAMERR,", "return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if action == \"accept\": news.status = 0", "from flask import render_template from flask import request from flask", "jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news = None try: # 3. 查询新闻 news", "News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not news: data", "@admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"]) def news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if request.method == \"GET\":", "c_dict[\"is_selected\"] = False if category.id == News.category_id: c_dict[\"is_selected\"] = True", "2. 将标题图片上传到七牛 try: key = storage(index_image) except Exception as e:", "render_template from flask import request from flask import session from", "try: news = News.query.get(news_id) except Exception as e: current_app.logger.error(e) if", "4. 保存到数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback()", "= request.json.get(\"reason\") if not reason: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason =", "current_page = paginate.page total_page = paginate.pages except Exception as e:", "news_id: data = { \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data)", "filters = list() # 如果有关键词 if keywords: # 添加关键词的检索选项 filters.append(News.title.contains(keywords))", "info import redis_store from info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha import", "# 1.1 判断数据是否有值: if not all([title, digest, content, categery_id]): return", "= { \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) # 返回数据", "request.args.get(\"news_id\") if not news_id: data = { \"errmsg\": \"没有找到新闻\" }", "errmsg=\"编辑成功\") @admin_blu.route(\"/news_category\") def get_news_category(): # 获取所有的分类数据 categories = Category.query.all() #", "as e: current_app.logger.error(e) # 查询月新增数 mon_count = 0 try: now", "e: current_app.error(e) news_dict_list = list() for news in news_list: news_dict_list.append(news.to_review_dict())", "\"current_page\": current_page, \"new_list\": news_dict_list, \"last_input\": b } if request.method ==", "request.method == \"GET\": news_id = request.args.get(\"news_id\") if not news_id: data", "import make_response from flask import redirect from flask import render_template", "total_page = paginate.pages except Exception as e: current_app.logger.error(e) # 将模型列表转换成字典列表", "as e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未找到新闻数据\") #", "} return render_template(\"admin/news_edit_detail.html\", data=data) # 查询新闻 news = None try:", "查询 paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list =", "if request.method == \"GET\": return render_template(\"admin/news_edit.html\", data=data) # return jsonify(errno=RET.OK,", "CCP from info.utils.captcha.captcha import captcha from info.utils.image_storage import storage from", "(now.tm_year, now.tm_mon, now.tm_mday) day_begin_date = datetime.strptime(day_begin, \"%Y-%m-%d\") day_count = User.query.filter(User.is_admin==False,", "} return render_template(\"admin/news_review_detail.html\", data=data) # 执行审核操作 # 1. 获取参数 news_id", "= title news.digest = digest news.content = content news.category_id =", "判断是否有分类id if category_id: try: category = Category.query.get(category_id) except Exception as", "jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url = constants.QINIU_DOMIN_PREFIX + key # 3. 设置相关数据", "try: new_category = Category() new_category.id = category_id new_category.name = category_name", "current_app.logger.error(e) return render_template(\"admin/login.html\", errmsg=\"数据错误\") if not user: return render_template(\"admin/login.html\", errmsg=\"用户名错误\")", "获取字典 cate_dict = category.to_dict() # 拼接内容 categories_dicts.append(cate_dict) categories_dicts.pop(0) # 返回内容", "users_list.append(user.to_admin_dict()) context = { \"total_page\": total_page, \"current_page\": current_page, \"users\": users_list", "datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date) # 定义空数组,保存数据 active_date = list() active_count =", "} return render_template(\"admin/news_review.html\", data=data) @admin_blu.route(\"/news_review_detail\", methods=[\"GET\", \"POST\"]) def news_review_detail(): \"\"\"新闻审核\"\"\"", "import captcha from info.utils.image_storage import storage from info.utils.response_code import RET", "= user.nick_name session[\"mobile\"] = user.mobile session[\"is_admin\"] = True # 跳转到后台管理主页,暂未实现", "errmsg=\"上传图片错误\") news.index_image_url = constants.QINIU_DOMIN_PREFIX + key # 3. 设置相关数据 news.title", "e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\") if not category: return jsonify(errno=RET.NODATA,", "(\"accept\", \"reject\"): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news = None try: #", "# 执行审核操作 # 1. 获取参数 news_id = request.json.get(\"news_id\") action =", "= News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not news:", "#2. 判断参数 if not all([news_id, action]): return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") if", "# return jsonify(errno=RET.OK, errmsg=\"OK\") return render_template(\"admin/news_edit.html\", data=data) @admin_blu.route(\"/news_edit_detail\", methods=[\"GET\", \"POST\"])", "category_id new_category.name = category_name db.session.add(new_category) db.session.commit() except Exception as e:", "0 else: # 拒绝通过,需要获取原因 reason = request.json.get(\"reason\") if not reason:", "= True categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0) data = { \"news\":", "current_page = 1 total_page = 1 try: filters = [News.status", "reason = request.json.get(\"reason\") if not reason: return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\") news.reason", "= datetime.strptime(day_begin, \"%Y-%m-%d\") day_count = User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count() except", "= paginate.items current_page = paginate.page total_page = paginate.pages except Exception", "} if request.method == \"GET\": return render_template(\"admin/news_edit.html\", data=data) # return", "Category() new_category.id = category_id new_category.name = category_name db.session.add(new_category) db.session.commit() except", "errmsg=\"参数有误\") # 2. 将标题图片上传到七牛 try: key = storage(index_image) except Exception", "db.session.add(new_category) db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR,", "from info.utils.common import user_login_data from datetime import datetime, timedelta from", "news = News.query.get(news_id) except Exception as e: current_app.logger.error(e) if not", "= {\"total_count\": total_count, \"mon_count\": mon_count, \"day_count\": day_count, \"active_date\": active_date, \"active_count\":", "def news_edit(): \"\"\"返回新闻列表\"\"\" page = request.args.get(\"p\", \"1\") print(page) a =", "News.category_id: c_dict[\"is_selected\"] = True categories_li.append(c_dict) # 移除最新分类 categories_li.pop(0) data =", "news.status = 0 else: # 拒绝通过,需要获取原因 reason = request.json.get(\"reason\") if", "\"day_count\": day_count, \"active_date\": active_date, \"active_count\": active_count} return render_template(\"admin/user_count.html\", data=data) @admin_blu.route(\"/user_list\")", "\"\" try: page = int(page) except Exception as e: current_app.logger.error(e)", "# 移除最新分类 categories_li.pop(0) data = { \"news\": news.to_dict(), \"categories\": categories_li", "添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) news_list", "e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") # 5. 返回结果 return", "= request.form.get(\"content\") index_image = request.form.get(\"index-image\") categery_id = request.form.get(\"category_id\") # 1.1", "return render_template(\"admin/news_edit_detail.html\", data=data) news_id = request.form.get(\"news_id\") title = request.form.get(\"title\") digest=", "categery_id # 4. 保存到数据库 try: db.session.commit() except Exception as e:", "= request.args.get(\"news_id\") if not news_id: data = { \"errmsg\": \"未查询到数据\"", "data = { \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\", data=data) #", "keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate = News.query.filter(*filters)\\ .order_by(News.create_time.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT,", "= 1 # 设置变量默认值 users = [] current_page = 1", "storage(index_image) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\") news.index_image_url", "import CCP from info.utils.captcha.captcha import captcha from info.utils.image_storage import storage", "request.url.endswith(url_for(\"admin.admin_login\")): user_id = session.get(\"user_id\") is_admin = session.get(\"is_admin\", False) if not", "methods=[\"POST\"]) def add_category(): \"\"\"修改或者添加分类\"\"\" category_id = request.json.get(\"id\") category_name = request.json.get(\"name\")", "查询总人数 total_count = 0 try: total_count = User.query.filter(User.is_admin == False).count()", "profile_blu from info.utils.common import user_login_data from datetime import datetime, timedelta", "移除最新分类 categories_li.pop(0) data = { \"news\": news.to_dict(), \"categories\": categories_li }", "news_edit_detail(): \"\"\"新闻编辑详情\"\"\" if request.method == \"GET\": # 获取参数 news_id =", "total_count = User.query.filter(User.is_admin == False).count() except Exception as e: current_app.logger.error(e)", "errmsg=\"参数错误\") try: user = User.query.filter(User.mobile == username).first() except Exception as", "paginate = User.query.filter(User.is_admin == False)\\ .order_by(User.last_login.desc())\\ .paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users", "as e: current_app.logger.error(e) if not news: return jsonify(errno=RET.NODATA, errmsg=\"未查询到数据\") if", "if request.method == \"GET\": # 去session 中取到指定的值 user_id = session.get(\"user_id\",", "# 查询图表信息 # 获取到当天00:00:00时间 now_date = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\") print(now_date) #", "b[0] keywords = b else: keywords = None b =", "paginate.pages except Exception as e: current_app.logger.error(e) # 将模型列表转换成字典列表 users_list =", "保存数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return", "return jsonify(errno=RET.OK, errmsg=\"保存数据成功\") else: # 如果没有分类id, 添加分类 try: new_category =", "Exception as e: current_app.logger.error(e) page = 1 # 设置变量默认值 users", "中取到指定的值 user_id = session.get(\"user_id\", None) is_admin = session.get(\"is_admin\", False) if", "User, Category, News from info.modules.profile import profile_blu from info.utils.common import", "User.query.filter(User.is_admin==False, User.create_time >= day_begin_date).count() except Exception as e: current_app.logger.error(e) #", "errmsg=\"保存数据成功\") else: # 如果没有分类id, 添加分类 try: new_category = Category() new_category.id", "as e: current_app.logger.error(e) return render_template(\"admin/login.html\", errmsg=\"数据错误\") if not user: return", "return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\") return jsonify(errno=RET.OK, errmsg=\"操作成功\") @admin_blu.route(\"/news_edit\", methods=[\"GET\", \"POST\"]) def", ".paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False) users = paginate.items current_page = paginate.page total_page", "b = b[0] keywords = b else: keywords = None", "\"POST\"]) def news_review_detail(): \"\"\"新闻审核\"\"\" # 获取新闻id if request.method == \"GET\":", "return render_template(\"admin/news_edit.html\", data=data) # return jsonify(errno=RET.OK, errmsg=\"OK\") return render_template(\"admin/news_edit.html\", data=data)", "for category in categories: c_dict = category.to_dict() c_dict[\"is_selected\"] = False", "!= 0] # 如果有关键词 if keywords: # 添加关键字检索选项 filters.append(News.title.contains(keywords)) paginate", "return render_template(\"admin/login.html\", errmsg=\"用户不是管理员\") session[\"user_id\"] = user.id session[\"nick_name\"] = user.nick_name session[\"mobile\"]", "- timedelta(days=i) end_date = now_date - timedelta(days=(i - 1)) active_date.append(begin_date.strftime(\"%Y-%m-%d\"))", "admin_blu @admin_blu.route(\"/login\", methods=[\"GET\", \"POST\"]) def admin_login(): if request.method == \"GET\":", "\"\"\"返回新闻列表\"\"\" page = request.args.get(\"p\", \"1\") print(page) a = re.match(r\"^\\d*\", page)", "= request.json.get(\"id\") category_name = request.json.get(\"name\") print(category_name) if not category_name: return", "except Exception as e: current_app.logger.error(e) day_count = 0 try: day_begin", "# 保存数据库 try: db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback()", "User.create_time >= day_begin_date).count() except Exception as e: current_app.logger.error(e) # 查询图表信息", "3. 设置相关数据 news.title = title news.digest = digest news.content =", "= categery_id # 4. 保存到数据库 try: db.session.commit() except Exception as", "page = request.args.get(\"p\", \"1\") print(page) a = re.match(r\"^\\d*\", page) b", "for user in users: users_list.append(user.to_admin_dict()) context = { \"total_page\": total_page,", "b = re.findall(r\"\"\"keywords=(\\w*)\"\"\", page) print(b) page = a.group() if b", "\"\"\"新闻编辑详情\"\"\" if request.method == \"GET\": # 获取参数 news_id = request.args.get(\"news_id\")", "categories: c_dict = category.to_dict() c_dict[\"is_selected\"] = False if category.id ==", "[] current_page = 1 total_page = 1 #查询数据 try: paginate", "not news: data = { \"errmsg\": \"未查询到数据\" } return render_template(\"admin/news_review_detail.html\",", "\"没有找到新闻\" } return render_template(\"admin/news_edit_detail.html\", data=data) # 查询新闻 news = None", "data=data) @admin_blu.route(\"/add_category\", methods=[\"POST\"]) def add_category(): \"\"\"修改或者添加分类\"\"\" category_id = request.json.get(\"id\") category_name" ]
[ "3 anslist = ans_vec() gate_model = load_model() test_title_feature = np.load('data/vectorized/Test_title.npy')", "from keras.models import * from keras.models import Model from keras.preprocessing", "data + '.csv') lines = csv.reader(f) for line in lines:", "fp.write('indiatimes\\n') #Low frequency words are replaced with \"indiatimes\" else: for", "elif an == 0 and anext != 0: fp.write(ind_a[anext]) else:", "tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index ind_a ={value:key for", "!= 0 and anext == 0: fp.write(ind_a[an]) elif an ==", "weights into new model gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model train_ans, anslist", "load json and create model json_file = open('models/MODEL.json', 'r') loaded_model_json", "test_title_feature, test_summary_feature]) fp = open('reports/Test.ans', 'w') for h in range(num_test):", "elif an != 0 and anext == 0: fp.write(ind_a[an]) elif", "import os import numpy as np from keras.models import *", "are replaced with \"indiatimes\" else: for j in range(dic): an", "= tokenizer_a.word_index ind_a ={value:key for key, value in dic_a.items()} num_test", "= model_from_json(loaded_model_json) # load weights into new model gate_model.load_weights('models/MODEL.h5', by_name=True)", "else: if an != 0: fp.write(ind_a[an] + '\\n') else: fp.write('\\n')", "os import numpy as np from keras.models import * from", "ind_a ={value:key for key, value in dic_a.items()} num_test = len(open('data/raw/Test.csv',", "duplicate words else: fp.write(ind_a[an] + ' ') elif an !=", "fp.write('\\n') fp.close() def main(): load_model() print('\\n\\nGenerating answers...') if os.path.exists('reports') ==", "= open('data/raw/' + data + '.csv') lines = csv.reader(f) for", "0: #Words before and after if an == anext: fp.write('')", "gate_model = model_from_json(loaded_model_json) # load weights into new model gate_model.load_weights('models/MODEL.h5',", "in range(dic): an = np.argmax(ans[i][j],axis=0) if j != dic-1: anext", "words are replaced with \"indiatimes\" else: for j in range(dic):", "'\\n') else: fp.write('\\n') fp.close() def main(): load_model() print('\\n\\nGenerating answers...') if", "test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist)", "def ans_vec(): anslist = [] dataset = ['Train'] for data", "ans_vec(): anslist = [] dataset = ['Train'] for data in", "csv.reader(f) for line in lines: source_uri = line[4] anslist.append(source_uri) f.close()", "if an == anext: fp.write('') #Delete duplicate words else: fp.write(ind_a[an]", "if an != 0 and anext != 0: #Words before", "#Delete duplicate words else: fp.write(ind_a[an] + ' ') elif an", "= np.argmax(ans[i][j+1],axis=0) if an != 0 and anext != 0:", "fp.write('') #Delete duplicate words else: fp.write(ind_a[an] + ' ') elif", "+ '.csv') lines = csv.reader(f) for line in lines: source_uri", "an == 0 and anext != 0: fp.write(ind_a[anext]) else: fp.write('')", "== 0: fp.write('indiatimes\\n') #Low frequency words are replaced with \"indiatimes\"", "an != 0 and anext == 0: fp.write(ind_a[an]) elif an", "keras.models import * from keras.models import Model from keras.preprocessing import", "in range(num_test): i = h if np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\\n')", "'r') loaded_model_json = json_file.read() json_file.close() gate_model = model_from_json(loaded_model_json) # load", "anslist = ans_vec() gate_model = load_model() test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature", "gate_model = load_model() test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a", "h in range(num_test): i = h if np.argmax(ans[i][0],axis=0) == 0:", "= len(open('data/raw/Test.csv', 'r').readlines()) ans = gate_model.predict([ test_title_feature, test_summary_feature]) fp =", "fp.write('') else: if an != 0: fp.write(ind_a[an] + '\\n') else:", "= h if np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\\n') #Low frequency words", "generate_save_ans(): dic = 3 anslist = ans_vec() gate_model = load_model()", "else: fp.write('\\n') fp.close() def main(): load_model() print('\\n\\nGenerating answers...') if os.path.exists('reports')", "False: os.mkdir('reports') if os.path.isfile('reports/Test.ans') == False: generate_save_ans() print('\\nAnswer generation complete...\\n\\n')", "0 and anext != 0: fp.write(ind_a[anext]) else: fp.write('') else: if", "fp.close() def main(): load_model() print('\\n\\nGenerating answers...') if os.path.exists('reports') == False:", "an != 0 and anext != 0: #Words before and", "fp.write(ind_a[anext]) else: fp.write('') else: if an != 0: fp.write(ind_a[an] +", "fp.write(ind_a[an] + '\\n') else: fp.write('\\n') fp.close() def main(): load_model() print('\\n\\nGenerating", "from keras.preprocessing import text def load_model(): print('\\nLoading model...') # load", "text def load_model(): print('\\nLoading model...') # load json and create", "in lines: source_uri = line[4] anslist.append(source_uri) f.close() return anslist def", "= load_model() test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a =", "import Model from keras.preprocessing import text def load_model(): print('\\nLoading model...')", "0 and anext == 0: fp.write(ind_a[an]) elif an == 0", "with \"indiatimes\" else: for j in range(dic): an = np.argmax(ans[i][j],axis=0)", "for j in range(dic): an = np.argmax(ans[i][j],axis=0) if j !=", "after if an == anext: fp.write('') #Delete duplicate words else:", "[] def ans_vec(): anslist = [] dataset = ['Train'] for", "csv import os import numpy as np from keras.models import", "model gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model train_ans, anslist = [], []", "anslist = [] dataset = ['Train'] for data in dataset:", "+ ' ') elif an != 0 and anext ==", "= json_file.read() json_file.close() gate_model = model_from_json(loaded_model_json) # load weights into", "frequency words are replaced with \"indiatimes\" else: for j in", "dic = 3 anslist = ans_vec() gate_model = load_model() test_title_feature", "fp.write(ind_a[an] + ' ') elif an != 0 and anext", "answers...') if os.path.exists('reports') == False: os.mkdir('reports') if os.path.isfile('reports/Test.ans') == False:", "return gate_model train_ans, anslist = [], [] def ans_vec(): anslist", "= ['Train'] for data in dataset: f = open('data/raw/' +", "j in range(dic): an = np.argmax(ans[i][j],axis=0) if j != dic-1:", "load weights into new model gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model train_ans,", "!= 0 and anext != 0: #Words before and after", "for line in lines: source_uri = line[4] anslist.append(source_uri) f.close() return", "anext == 0: fp.write(ind_a[an]) elif an == 0 and anext", "if os.path.isfile('reports/Test.ans') == False: generate_save_ans() print('\\nAnswer generation complete...\\n\\n') if __name__", "== 0 and anext != 0: fp.write(ind_a[anext]) else: fp.write('') else:", "dataset: f = open('data/raw/' + data + '.csv') lines =", "else: fp.write(ind_a[an] + ' ') elif an != 0 and", "and create model json_file = open('models/MODEL.json', 'r') loaded_model_json = json_file.read()", "test_summary_feature]) fp = open('reports/Test.ans', 'w') for h in range(num_test): i", "== False: generate_save_ans() print('\\nAnswer generation complete...\\n\\n') if __name__ == \"__main__\":", "os.mkdir('reports') if os.path.isfile('reports/Test.ans') == False: generate_save_ans() print('\\nAnswer generation complete...\\n\\n') if", "gate_model.predict([ test_title_feature, test_summary_feature]) fp = open('reports/Test.ans', 'w') for h in", "len(open('data/raw/Test.csv', 'r').readlines()) ans = gate_model.predict([ test_title_feature, test_summary_feature]) fp = open('reports/Test.ans',", "import csv import os import numpy as np from keras.models", "import text def load_model(): print('\\nLoading model...') # load json and", "print('\\n\\nGenerating answers...') if os.path.exists('reports') == False: os.mkdir('reports') if os.path.isfile('reports/Test.ans') ==", "json_file = open('models/MODEL.json', 'r') loaded_model_json = json_file.read() json_file.close() gate_model =", "0: fp.write('indiatimes\\n') #Low frequency words are replaced with \"indiatimes\" else:", "if an != 0: fp.write(ind_a[an] + '\\n') else: fp.write('\\n') fp.close()", "gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model train_ans, anslist = [], [] def", "= [] dataset = ['Train'] for data in dataset: f", "Generation import csv import os import numpy as np from", "Model from keras.preprocessing import text def load_model(): print('\\nLoading model...') #", "for key, value in dic_a.items()} num_test = len(open('data/raw/Test.csv', 'r').readlines()) ans", "0 and anext != 0: #Words before and after if", "replaced with \"indiatimes\" else: for j in range(dic): an =", "!= 0: fp.write(ind_a[an] + '\\n') else: fp.write('\\n') fp.close() def main():", "line[4] anslist.append(source_uri) f.close() return anslist def generate_save_ans(): dic = 3", "= ans_vec() gate_model = load_model() test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature =", "load_model() test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1)", "j != dic-1: anext = np.argmax(ans[i][j+1],axis=0) if an != 0", "return anslist def generate_save_ans(): dic = 3 anslist = ans_vec()", "\"indiatimes\" else: for j in range(dic): an = np.argmax(ans[i][j],axis=0) if", "== False: os.mkdir('reports') if os.path.isfile('reports/Test.ans') == False: generate_save_ans() print('\\nAnswer generation", "anslist = [], [] def ans_vec(): anslist = [] dataset", "test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index", "= open('models/MODEL.json', 'r') loaded_model_json = json_file.read() json_file.close() gate_model = model_from_json(loaded_model_json)", "* from keras.models import Model from keras.preprocessing import text def", "= np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a", "[], [] def ans_vec(): anslist = [] dataset = ['Train']", "range(dic): an = np.argmax(ans[i][j],axis=0) if j != dic-1: anext =", "else: for j in range(dic): an = np.argmax(ans[i][j],axis=0) if j", "np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index ind_a ={value:key", "== 0: fp.write(ind_a[an]) elif an == 0 and anext !=", "ans = gate_model.predict([ test_title_feature, test_summary_feature]) fp = open('reports/Test.ans', 'w') for", "before and after if an == anext: fp.write('') #Delete duplicate", "keras.preprocessing import text def load_model(): print('\\nLoading model...') # load json", "= open('reports/Test.ans', 'w') for h in range(num_test): i = h", "load_model() print('\\n\\nGenerating answers...') if os.path.exists('reports') == False: os.mkdir('reports') if os.path.isfile('reports/Test.ans')", "') elif an != 0 and anext == 0: fp.write(ind_a[an])", "dic_a = tokenizer_a.word_index ind_a ={value:key for key, value in dic_a.items()}", "fp.write(ind_a[an]) elif an == 0 and anext != 0: fp.write(ind_a[anext])", "print('\\nLoading model...') # load json and create model json_file =", "h if np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\\n') #Low frequency words are", "'r').readlines()) ans = gate_model.predict([ test_title_feature, test_summary_feature]) fp = open('reports/Test.ans', 'w')", "an = np.argmax(ans[i][j],axis=0) if j != dic-1: anext = np.argmax(ans[i][j+1],axis=0)", "an == anext: fp.write('') #Delete duplicate words else: fp.write(ind_a[an] +", "words else: fp.write(ind_a[an] + ' ') elif an != 0", "in dic_a.items()} num_test = len(open('data/raw/Test.csv', 'r').readlines()) ans = gate_model.predict([ test_title_feature,", "def load_model(): print('\\nLoading model...') # load json and create model", "data in dataset: f = open('data/raw/' + data + '.csv')", "anext != 0: #Words before and after if an ==", "fp = open('reports/Test.ans', 'w') for h in range(num_test): i =", "i = h if np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\\n') #Low frequency", "for h in range(num_test): i = h if np.argmax(ans[i][0],axis=0) ==", "anext != 0: fp.write(ind_a[anext]) else: fp.write('') else: if an !=", "source_uri = line[4] anslist.append(source_uri) f.close() return anslist def generate_save_ans(): dic", "numpy as np from keras.models import * from keras.models import", "loaded_model_json = json_file.read() json_file.close() gate_model = model_from_json(loaded_model_json) # load weights", "#Words before and after if an == anext: fp.write('') #Delete", "json and create model json_file = open('models/MODEL.json', 'r') loaded_model_json =", "from keras.models import Model from keras.preprocessing import text def load_model():", "and after if an == anext: fp.write('') #Delete duplicate words", "else: fp.write('') else: if an != 0: fp.write(ind_a[an] + '\\n')", "model json_file = open('models/MODEL.json', 'r') loaded_model_json = json_file.read() json_file.close() gate_model", "np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\\n') #Low frequency words are replaced with", "lines: source_uri = line[4] anslist.append(source_uri) f.close() return anslist def generate_save_ans():", "num_test = len(open('data/raw/Test.csv', 'r').readlines()) ans = gate_model.predict([ test_title_feature, test_summary_feature]) fp", "into new model gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model train_ans, anslist =", "text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index ind_a ={value:key for key, value", "os.path.exists('reports') == False: os.mkdir('reports') if os.path.isfile('reports/Test.ans') == False: generate_save_ans() print('\\nAnswer", "anslist def generate_save_ans(): dic = 3 anslist = ans_vec() gate_model", "'w') for h in range(num_test): i = h if np.argmax(ans[i][0],axis=0)", "new model gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model train_ans, anslist = [],", "json_file.close() gate_model = model_from_json(loaded_model_json) # load weights into new model", "open('models/MODEL.json', 'r') loaded_model_json = json_file.read() json_file.close() gate_model = model_from_json(loaded_model_json) #", "value in dic_a.items()} num_test = len(open('data/raw/Test.csv', 'r').readlines()) ans = gate_model.predict([", "!= 0: fp.write(ind_a[anext]) else: fp.write('') else: if an != 0:", "dataset = ['Train'] for data in dataset: f = open('data/raw/'", "by_name=True) return gate_model train_ans, anslist = [], [] def ans_vec():", "open('reports/Test.ans', 'w') for h in range(num_test): i = h if", "f.close() return anslist def generate_save_ans(): dic = 3 anslist =", "f = open('data/raw/' + data + '.csv') lines = csv.reader(f)", "line in lines: source_uri = line[4] anslist.append(source_uri) f.close() return anslist", "+ data + '.csv') lines = csv.reader(f) for line in", "={value:key for key, value in dic_a.items()} num_test = len(open('data/raw/Test.csv', 'r').readlines())", "False: generate_save_ans() print('\\nAnswer generation complete...\\n\\n') if __name__ == \"__main__\": main()", "and anext != 0: #Words before and after if an", "in dataset: f = open('data/raw/' + data + '.csv') lines", "if os.path.exists('reports') == False: os.mkdir('reports') if os.path.isfile('reports/Test.ans') == False: generate_save_ans()", "= gate_model.predict([ test_title_feature, test_summary_feature]) fp = open('reports/Test.ans', 'w') for h", "and anext != 0: fp.write(ind_a[anext]) else: fp.write('') else: if an", "0: fp.write(ind_a[an] + '\\n') else: fp.write('\\n') fp.close() def main(): load_model()", "def main(): load_model() print('\\n\\nGenerating answers...') if os.path.exists('reports') == False: os.mkdir('reports')", "['Train'] for data in dataset: f = open('data/raw/' + data", "anslist.append(source_uri) f.close() return anslist def generate_save_ans(): dic = 3 anslist", "ans_vec() gate_model = load_model() test_title_feature = np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy')", "np.load('data/vectorized/Test_title.npy') test_summary_feature = np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a =", "#Low frequency words are replaced with \"indiatimes\" else: for j", "#Answer Generation import csv import os import numpy as np", "as np from keras.models import * from keras.models import Model", "= csv.reader(f) for line in lines: source_uri = line[4] anslist.append(source_uri)", "== anext: fp.write('') #Delete duplicate words else: fp.write(ind_a[an] + '", "' ') elif an != 0 and anext == 0:", "0: fp.write(ind_a[an]) elif an == 0 and anext != 0:", "model_from_json(loaded_model_json) # load weights into new model gate_model.load_weights('models/MODEL.h5', by_name=True) return", "# load weights into new model gate_model.load_weights('models/MODEL.h5', by_name=True) return gate_model", "lines = csv.reader(f) for line in lines: source_uri = line[4]", "np.argmax(ans[i][j],axis=0) if j != dic-1: anext = np.argmax(ans[i][j+1],axis=0) if an", "json_file.read() json_file.close() gate_model = model_from_json(loaded_model_json) # load weights into new", "and anext == 0: fp.write(ind_a[an]) elif an == 0 and", "model...') # load json and create model json_file = open('models/MODEL.json',", "train_ans, anslist = [], [] def ans_vec(): anslist = []", "'.csv') lines = csv.reader(f) for line in lines: source_uri =", "anext = np.argmax(ans[i][j+1],axis=0) if an != 0 and anext !=", "an != 0: fp.write(ind_a[an] + '\\n') else: fp.write('\\n') fp.close() def", "tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index ind_a ={value:key for key, value in", "tokenizer_a.word_index ind_a ={value:key for key, value in dic_a.items()} num_test =", "= line[4] anslist.append(source_uri) f.close() return anslist def generate_save_ans(): dic =", "= np.argmax(ans[i][j],axis=0) if j != dic-1: anext = np.argmax(ans[i][j+1],axis=0) if", "import * from keras.models import Model from keras.preprocessing import text", "!= dic-1: anext = np.argmax(ans[i][j+1],axis=0) if an != 0 and", "load_model(): print('\\nLoading model...') # load json and create model json_file", "def generate_save_ans(): dic = 3 anslist = ans_vec() gate_model =", "+ '\\n') else: fp.write('\\n') fp.close() def main(): load_model() print('\\n\\nGenerating answers...')", "= 3 anslist = ans_vec() gate_model = load_model() test_title_feature =", "dic-1: anext = np.argmax(ans[i][j+1],axis=0) if an != 0 and anext", "= text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index ind_a ={value:key for key,", "key, value in dic_a.items()} num_test = len(open('data/raw/Test.csv', 'r').readlines()) ans =", "for data in dataset: f = open('data/raw/' + data +", "range(num_test): i = h if np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\\n') #Low", "# load json and create model json_file = open('models/MODEL.json', 'r')", "if j != dic-1: anext = np.argmax(ans[i][j+1],axis=0) if an !=", "os.path.isfile('reports/Test.ans') == False: generate_save_ans() print('\\nAnswer generation complete...\\n\\n') if __name__ ==", "[] dataset = ['Train'] for data in dataset: f =", "keras.models import Model from keras.preprocessing import text def load_model(): print('\\nLoading", "= [], [] def ans_vec(): anslist = [] dataset =", "gate_model train_ans, anslist = [], [] def ans_vec(): anslist =", "dic_a.items()} num_test = len(open('data/raw/Test.csv', 'r').readlines()) ans = gate_model.predict([ test_title_feature, test_summary_feature])", "import numpy as np from keras.models import * from keras.models", "if np.argmax(ans[i][0],axis=0) == 0: fp.write('indiatimes\\n') #Low frequency words are replaced", "0: fp.write(ind_a[anext]) else: fp.write('') else: if an != 0: fp.write(ind_a[an]", "!= 0: #Words before and after if an == anext:", "= np.load('data/vectorized/Test_summary.npy') tokenizer_a = text.Tokenizer(num_words=dic+1) tokenizer_a.fit_on_texts(anslist) dic_a = tokenizer_a.word_index ind_a", "open('data/raw/' + data + '.csv') lines = csv.reader(f) for line", "create model json_file = open('models/MODEL.json', 'r') loaded_model_json = json_file.read() json_file.close()", "anext: fp.write('') #Delete duplicate words else: fp.write(ind_a[an] + ' ')", "main(): load_model() print('\\n\\nGenerating answers...') if os.path.exists('reports') == False: os.mkdir('reports') if", "np.argmax(ans[i][j+1],axis=0) if an != 0 and anext != 0: #Words", "np from keras.models import * from keras.models import Model from" ]
[ "#在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file = open('tools/address.txt', 'r') address", "socket import * def client(): #實驗室電腦 # serverip='192.168.3.11' # serverport=8887", "= address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__': client() # buffer='POST /post", "if __name__=='__main__': client() # buffer='POST /post HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n' #", "buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address : ' + address", "import * def client(): #實驗室電腦 # serverip='192.168.3.11' # serverport=8887 #在自己電腦測試", "serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file = open('tools/address.txt', 'r') address = address_file.read()", "buffer='POST /post HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address", "client() # buffer='POST /post HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n'", "# serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file = open('tools/address.txt',", "+ address + '\\r\\n' # buffer+='\\r\\n' # print(buffer) # message", "= open('tools/address.txt', 'r') address = address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__':", "'r') address = address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__': client() #", "#實驗室電腦 # serverip='192.168.3.11' # serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport))", ": ' + address + '\\r\\n' # buffer+='\\r\\n' # print(buffer)", "# buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address : ' + address +", "open('tools/address.txt', 'r') address = address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__': client()", "# buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address : ' +", "def client(): #實驗室電腦 # serverip='192.168.3.11' # serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888", "client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file = open('tools/address.txt', 'r') address = address_file.read() client.send(address.encode())", "# buffer+='Address : ' + address + '\\r\\n' # buffer+='\\r\\n'", "buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address : ' + address + '\\r\\n'", "Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address : ' + address + '\\r\\n' #", "buffer+='Address : ' + address + '\\r\\n' # buffer+='\\r\\n' #", "+ '\\r\\n' # buffer+='\\r\\n' # print(buffer) # message = \"國立台北大學世界第一:)\"", "# serverip='192.168.3.11' # serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file", "address = address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__': client() # buffer='POST", "print(client.recv(1024).decode()) if __name__=='__main__': client() # buffer='POST /post HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n'", "address_file = open('tools/address.txt', 'r') address = address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if", "address_file.read() client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__': client() # buffer='POST /post HTTP/1.1\\r\\n'", "client(): #實驗室電腦 # serverip='192.168.3.11' # serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM)", "address + '\\r\\n' # buffer+='\\r\\n' # print(buffer) # message =", "/post HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address :", "# buffer='POST /post HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' #", "serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file = open('tools/address.txt', 'r')", "from socket import * def client(): #實驗室電腦 # serverip='192.168.3.11' #", "-*- from socket import * def client(): #實驗室電腦 # serverip='192.168.3.11'", "client.connect((serverip,serverport)) address_file = open('tools/address.txt', 'r') address = address_file.read() client.send(address.encode()) print(client.recv(1024).decode())", "HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016 Chao,He-Teng\\\\\"}\\r\\n' # buffer+='Address : '", "coding: UTF-8 -*- from socket import * def client(): #實驗室電腦", "UTF-8 -*- from socket import * def client(): #實驗室電腦 #", "serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file = open('tools/address.txt', 'r') address =", "-*- coding: UTF-8 -*- from socket import * def client():", "# -*- coding: UTF-8 -*- from socket import * def", "' + address + '\\r\\n' # buffer+='\\r\\n' # print(buffer) #", "* def client(): #實驗室電腦 # serverip='192.168.3.11' # serverport=8887 #在自己電腦測試 serverip='127.0.0.1'", "__name__=='__main__': client() # buffer='POST /post HTTP/1.1\\r\\n' # buffer+='Content-Type:application/json\\r\\n' # buffer+='Body:{\\\\\"StuId\\\\\":\\\\\"410785016", "serverip='192.168.3.11' # serverport=8887 #在自己電腦測試 serverip='127.0.0.1' serverport=8888 client=socket(AF_INET,SOCK_STREAM) client.connect((serverip,serverport)) address_file =", "client.send(address.encode()) print(client.recv(1024).decode()) if __name__=='__main__': client() # buffer='POST /post HTTP/1.1\\r\\n' #" ]
[ "**kwargs ) def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray:", "= 0.25, domain_extent: float = 32 * np.pi, damping_coeff: float", "additive state noise fields. Larger values correspond to smoother fields.", "the state field by its the Fourier coefficients rather than", "Kassam, Aly-Khan and Trefethen, <NAME>. Fourth-order time-stepping for stiff PDEs.", "Kuramoto and Tsuzuki. Persistent propagation of concentration waves in dissipative", "2 == 0, \"State dimension `dim_state` must be even\" self.time_step", "of integers specifying spatial mesh node indices (indices in to", "observation_noise_std=observation_noise_std, **kwargs ) def _next_state_mean(self, states: np.ndarray, t: int) ->", "+ 1j * initial_state_kernel, False ) def linear_operator(freqs, freqs_sq): return", "self.observation_space_indices = observation_space_indices self.observation_function = observation_function spatial_freqs = np.arange(dim_state //", "each dimension assumed to be independent i.e. a diagonal noise", "8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, time_step: float =", "exhibits spatio-temporally chaotic dynamics. The governing stochastic partial differential equation", "import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms import (", "the kth spatial frequency and `i` the imaginary unit. A", "diagonal noise covariance. initial_state_amplitude: Amplitude scale parameter for initial random", "mesh points. Based on the Kuramato--Sivashinsky PDE model [1, 2]", "PDE model [1, 2] which exhibits spatio-temporally chaotic dynamics. The", "for smoothed noise used to generate initial state and additive", "(dim_state / domain_extent) ** 0.5 ) state_noise_std = rfft_coeff_to_real_array( state_noise_kernel", "Extent (size) of spatial domain. damping_coeff: Coefficient (`γ` in description", "κ̃ₖ * dW̃ₖ where `W̃ₖ` is a complex-valued Wiener process,", "spatial_freqs ** 2 spatial_freqs[dim_state // 2] = 0 state_noise_kernel =", "analysis of hydrodynamic instability in laminar flames I. Derivation of", "flame fronts. This model class represents the state field by", "a periodic 1D spatial domain for laminar flame fronts. This", "state noise in model dynamics. Larger values correspond to larger", "in a periodic domain `[0, S)`, `t` the time coordinate,", "partial differential equation (SPDE) is dX = -(∂⁴X/∂s⁴ + ∂²X/∂s²", "2 * np.pi / domain_extent spatial_freqs_sq = spatial_freqs ** 2", "`σ` is a parameter controlling the amplitude and `ℓ` a", "√(M / S) where `σ` is a parameter controlling the", "`i` the imaginary unit. A Fourier-domain exponential time-differencing integrator with", "scale parameter for additive state noise in model dynamics. Larger", "the time coordinate, `X(s, t)` the state field process, `γ`", "initial_state_std = rfft_coeff_to_real_array( initial_state_kernel + 1j * initial_state_kernel, False )", "= fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ..., self.observation_space_indices ] if self.observation_function is None:", "on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally", "4 (1977) pp. 1177–1206. \"\"\" from typing import Union, Optional,", "model class represents the state field by its the Fourier", "Standard deviation of additive Gaussian noise in observations. Either a", "* spatial_freqs_sq * state_noise_length_scale ** 2) * (dim_state / domain_extent)", "values for the initial state. state_noise_amplitude: Amplitude scale parameter for", "the spatial coordinate. Using a spectral spatial discretisation, this corresponds", "Runge-- Kutta updates for non-linear terms [3, 4] is used", "at a given time index. Defaults to identity function in", "X̃ₖ + (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) +", "used for the Wiener process increment. The smoothing kernel Fourier", ") state_noise_std = rfft_coeff_to_real_array( state_noise_kernel + 1j * state_noise_kernel, False", "integrator. \"\"\" assert dim_state % 2 == 0, \"State dimension", "of the SDE dynamics and an Euler-Maruyama discretisation used for", "periodic 1D spatial domain for laminar flame fronts. This model", "integrals in exponential time-differencing plus fourth-order Runge Kutta integrator. \"\"\"", "fields. num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in", "-(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s + γ *", "flames I. Derivation of basic equations. Acta Astronomica, 4 (1977)", "∂²X/∂s² + X * ∂X/∂s + γ * X) dt", "a diagonal noise covariance. initial_state_amplitude: Amplitude scale parameter for initial", "= rfft_coeff_to_real_array( initial_state_kernel + 1j * initial_state_kernel, False ) def", "parameter for additive state noise in model dynamics. Larger values", "np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ..., self.observation_space_indices ] if self.observation_function", "int = 512, observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),", "= 1.0, state_noise_amplitude: float = 1.0, state_noise_length_scale: float = 1.0,", "discretisation used for the Wiener process increment. The smoothing kernel", "of basic equations. Acta Astronomica, 4 (1977) pp. 1177–1206. \"\"\"", "laminar flames I. Derivation of basic equations. Acta Astronomica, 4", "the imaginary unit. A Fourier-domain exponential time-differencing integrator with 4th", "np.ndarray, t: int) -> np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def", "Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits", "): \"\"\" Args: dim_state: Dimension of state which is equivalent", "else: dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation,", "time_step: Integrator time step. domain_extent: Extent (size) of spatial domain.", "np.ndarray]] = None, time_step: float = 0.25, domain_extent: float =", "= None, time_step: float = 0.25, domain_extent: float = 32", "propagation of concentration waves in dissipative media far from thermal", "A Fourier-domain exponential time-differencing integrator with 4th order Runge-- Kutta", "16, **kwargs ): \"\"\" Args: dim_state: Dimension of state which", "kth spatial frequency and `i` the imaginary unit. A Fourier-domain", "correspond to larger magnitude additive noise in the state field.", "from typing import Union, Optional, Sequence, Callable import numpy as", "`κ(s)` a spatial smoothing kernel and `⊛` indicates circular convolution", "- freqs_sq ** 2 - damping_coeff def nonlinear_operator(v, freqs, freqs_sq):", "in the state field. state_noise_length_scale: Length scale parameter for smoothed", "given time index. Defaults to identity function in first argument.", "(ωₖ² - ωₖ⁴ - γ) * X̃ₖ + (i *", "and Tsuzuki. Persistent propagation of concentration waves in dissipative media", "= (ωₖ² - ωₖ⁴ - γ) * X̃ₖ + (i", "Dimension of state which is equivalent here to number of", "**kwargs ): \"\"\" Args: dim_state: Dimension of state which is", "return subsampled_states else: return self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,", "2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ where `W̃ₖ` is", "(time_step) ** 0.5 * state_noise_amplitude * np.exp(-0.5 * spatial_freqs_sq *", "Slice or sequence of integers specifying spatial mesh node indices", "domain_extent) ** 0.5 ) state_noise_std = rfft_coeff_to_real_array( state_noise_kernel + 1j", "given state(s) at a given time index. Defaults to identity", "initial_state_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2) *", "to larger magnitude additive noise in the state field. state_noise_length_scale:", "self, dim_state: int = 512, observation_space_indices: Union[slice, Sequence[int]] = slice(4,", "exhibits spatio-temporally chaotic dynamics. References: 1. Kuramoto and Tsuzuki. Persistent", "(size) of spatial domain. damping_coeff: Coefficient (`γ` in description above)", "be independent i.e. a diagonal noise covariance. initial_state_amplitude: Amplitude scale", "SDE dynamics and an Euler-Maruyama discretisation used for the Wiener", "time-differencing plus fourth-order Runge Kutta integrator. \"\"\" assert dim_state %", "to generate initial state and additive state noise fields. Larger", "indices (indices in to state vector) corresponding to observation points.", "domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function is None: dim_observation =", "time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function is None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0]", "state vector) corresponding to observation points. observation_function: Function to apply", "domain. damping_coeff: Coefficient (`γ` in description above) controlling degree of", "__init__( self, dim_state: int = 512, observation_space_indices: Union[slice, Sequence[int]] =", ") self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator,", "/ 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ where `W̃ₖ`", "or sequence of integers specifying spatial mesh node indices (indices", "integrator with 4th order Runge-- Kutta updates for non-linear terms", "spatial_freqs = np.arange(dim_state // 2 + 1) * 2 *", "noise fields. Larger values correspond to smoother fields. num_roots_of_unity_etdrk4_integrator: Number", "model dynamics. Larger values correspond to larger magnitude additive noise", "smoothing kernel and `⊛` indicates circular convolution in the spatial", "26.4 (2005): 1214-1233. 4. Cox, <NAME>. and Matthews, <NAME>. Exponential", "used to generate initial state and additive state noise fields.", "PDEs. SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233. 4.", "frequency and `i` the imaginary unit. A Fourier-domain exponential time-differencing", "coefficients rather than values of the state field at the", "-> np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ..., self.observation_space_indices ] if", "super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs ) def", "** 2, norm=\"ortho\") ) self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state,", "`κ̃ₖ` the kth Fourier coefficient of the smoothing kernel `κ`,", "dynamics, `W(s, t)` a space-time white noise process, `κ(s)` a", "mesh points rather than the corresponding Fourier coefficients. For more", "compute mean of observation(s) given state(s) at a given time", "time-differencing integrator with 4th order Runge-- Kutta updates for non-linear", "damping in dynamics. observation_noise_std: Standard deviation of additive Gaussian noise", "of concentration waves in dissipative media far from thermal equilibrium.", "class represents the state field by its the Fourier coefficients", "== 0, \"State dimension `dim_state` must be even\" self.time_step =", "observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std,", "This model class represents the state field by its the", "self.observation_space_indices ] if self.observation_function is None: return subsampled_states else: return", "a periodic domain `[0, S)`, `t` the time coordinate, `X(s,", "the length scale. References: 1. Kuramoto and Tsuzuki. Persistent propagation", "additive Gaussian noise in observations. Either a scalar or array", "1214-1233. 4. Cox, <NAME>. and Matthews, <NAME>. Exponential time differencing", "32 * np.pi, damping_coeff: float = 1.0 / 6, observation_noise_std:", "correspond to smoother fields. num_roots_of_unity_etdrk4_integrator: Number of roots of unity", "must be even\" self.time_step = time_step self.observation_space_indices = observation_space_indices self.observation_function", "is None: return subsampled_states else: return self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel(", "/ S) where `σ` is a parameter controlling the amplitude", "fft.rfft(fft.irfft(v, norm=\"ortho\") ** 2, norm=\"ortho\") ) self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator,", "exp(-ωₖ² * ℓ²) * √(M / S) where `σ` is", "observation_function is None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation = observation_function(", "= slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,", "Sequence[int]] = slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] =", "convolution in the spatial coordinate. Using a spectral spatial discretisation,", "to larger magnitude values for the initial state. state_noise_amplitude: Amplitude", "on Scientific Computing 26.4 (2005): 1214-1233. 4. Cox, <NAME>. and", "`dim_state` must be even\" self.time_step = time_step self.observation_space_indices = observation_space_indices", "0.25, domain_extent: float = 32 * np.pi, damping_coeff: float =", "* (dim_state / domain_extent) ** 0.5 ) state_noise_std = rfft_coeff_to_real_array(", "to a non-linear system of stochastic differential equations (SDEs) in", "unit. A Fourier-domain exponential time-differencing integrator with 4th order Runge--", "t)` a space-time white noise process, `κ(s)` a spatial smoothing", "if observation_function is None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation =", "the Fourier coefficients X̃ₖ dX̃ₖ = (ωₖ² - ωₖ⁴ -", "magnitude additive noise in the state field. state_noise_length_scale: Length scale", "norm=\"ortho\") ** 2, norm=\"ortho\") ) self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator,", "[1, 2] which exhibits spatio-temporally chaotic dynamics. The governing stochastic", "state_noise_amplitude: float = 1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int", "observation(s) given state(s) at a given time index. Defaults to", "above) controlling degree of damping in dynamics. observation_noise_std: Standard deviation", "= observation_space_indices self.observation_function = observation_function spatial_freqs = np.arange(dim_state // 2", "dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0]", "time-differencing plus fourth-order Runge Kutta integrator. \"\"\" super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices,", "in the Fourier coefficients X̃ₖ dX̃ₖ = (ωₖ² - ωₖ⁴", "from dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4", "of damping in dynamics. observation_noise_std: Standard deviation of additive Gaussian", "state field process, `γ` a coefficient controlling the degree of", "- ωₖ⁴ - γ) * X̃ₖ + (i * ωₖ", "int = 16, ): \"\"\" Args: dim_state: Dimension of state", "dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms import", "SPDE model on a periodic 1D spatial domain for laminar", "-0.5j * freqs * fft.rfft(fft.irfft(v, norm=\"ortho\") ** 2, norm=\"ortho\") )", "the Wiener process increment. The smoothing kernel Fourier coefficients are", "state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs ) def _next_state_mean(self, states: np.ndarray, t: int)", "import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear", "4] is used to integrate the deterministic component of the", "np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self, states: np.ndarray, t:", "= σ * exp(-ωₖ² * ℓ²) * √(M / S)", "spatial mesh points. Based on the Kuramato--Sivashinsky PDE model [1,", "Gaussian noise in observations. Either a scalar or array of", "Kutta integrator. \"\"\" super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff,", "np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__( dim_state=dim_state,", "Fourier coefficients X̃ₖ dX̃ₖ = (ωₖ² - ωₖ⁴ - γ)", "1j * state_noise_kernel, False ) initial_state_kernel = ( initial_state_amplitude *", "* exp(-ωₖ² * ℓ²) * √(M / S) where `σ`", "initial state and additive state noise fields. Larger values correspond", "OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): \"\"\"Non-linear SPDE model on a periodic 1D", "used to integrate the deterministic component of the SDE dynamics", "Fourier coefficient of the smoothing kernel `κ`, `ωₖ = 2", "norm=\"ortho\") ) self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step,", "Kutta updates for non-linear terms [3, 4] is used to", "float = 1.0, state_noise_amplitude: float = 1.0, state_noise_length_scale: float =", "/ 6, observation_noise_std: float = 0.5, initial_state_amplitude: float = 1.0,", "field process, `γ` a coefficient controlling the degree of damping", "nonlinear_operator(v, freqs, freqs_sq): return ( -0.5j * freqs * fft.rfft(fft.irfft(v,", "apply to subsampled state field to compute mean of observation(s)", "in approximating contour integrals in exponential time-differencing plus fourth-order Runge", "basic equations. Acta Astronomica, 4 (1977) pp. 1177–1206. \"\"\" from", "γ) * X̃ₖ + (i * ωₖ / 2) *", "a non-linear system of stochastic differential equations (SDEs) in the", "= 32 * np.pi, damping_coeff: float = 1.0 / 6,", "of roots of unity to use in approximating contour integrals", "periodic 1D spatial domain for laminar wave fronts. Based on", "process increment. The smoothing kernel Fourier coefficients are assumed to", "see the docstring of `FourierLaminarFlameModel`. \"\"\" def __init__( self, dim_state:", "state_noise_length_scale: Length scale parameter for smoothed noise used to generate", "= 0 state_noise_kernel = ( (time_step) ** 0.5 * state_noise_amplitude", "coefficient of the smoothing kernel `κ`, `ωₖ = 2 *", "values of the state field at the spatial mesh points.", "* state_noise_kernel, False ) initial_state_kernel = ( initial_state_amplitude * np.exp(-0.5", "dim_state % 2 == 0, \"State dimension `dim_state` must be", "is equivalent here to number of mesh points in spatial", "* ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ", "fourth-order Runge Kutta integrator. \"\"\" assert dim_state % 2 ==", "in laminar flames I. Derivation of basic equations. Acta Astronomica,", "noise process, `κ(s)` a spatial smoothing kernel and `⊛` indicates", "<NAME>. and Matthews, <NAME>. Exponential time differencing for stiff systems.", "updates for non-linear terms [3, 4] is used to integrate", "Computational Physics 176.2 (2002): 430-455. \"\"\" def __init__( self, dim_state:", "integers specifying spatial mesh node indices (indices in to state", "in dynamics. observation_noise_std: Standard deviation of additive Gaussian noise in", "dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs ) def _next_state_mean(self,", "] if self.observation_function is None: return subsampled_states else: return self.observation_function(subsampled_states,", "component of the SDE dynamics and an Euler-Maruyama discretisation used", "index. Defaults to identity function in first argument. time_step: Integrator", "discretisation, this corresponds to a non-linear system of stochastic differential", "on a periodic 1D spatial domain for laminar flame fronts.", "noise in observations. Either a scalar or array of shape", "in exponential time-differencing plus fourth-order Runge Kutta integrator. \"\"\" assert", "integrator. \"\"\" super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std,", "states: np.ndarray, t: int) -> np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[", "4 (1977) pp. 1177–1206. 3. Kassam, Aly-Khan and Trefethen, <NAME>.", "initial_state_amplitude: float = 1.0, state_noise_amplitude: float = 1.0, state_noise_length_scale: float", "% 2 == 0, \"State dimension `dim_state` must be even\"", "None, time_step: float = 0.25, domain_extent: float = 32 *", "2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar flames", "`X(s, t)` the state field process, `γ` a coefficient controlling", "def __init__( self, dim_state: int = 512, observation_space_indices: Union[slice, Sequence[int]]", "(i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ *", "** 0.5 * state_noise_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale", "(2002): 430-455. \"\"\" def __init__( self, dim_state: int = 512,", "first argument. time_step: Integrator time step. domain_extent: Extent (size) of", "/ domain_extent spatial_freqs_sq = spatial_freqs ** 2 spatial_freqs[dim_state // 2]", ") initial_state_kernel = ( initial_state_amplitude * np.exp(-0.5 * spatial_freqs_sq *", "= 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, ): \"\"\" Args: dim_state:", "the docstring of `FourierLaminarFlameModel`. \"\"\" def __init__( self, dim_state: int", ") initial_state_std = rfft_coeff_to_real_array( initial_state_kernel + 1j * initial_state_kernel, False", "Trefethen, <NAME>. Fourth-order time-stepping for stiff PDEs. SIAM Journal on", "dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale,", "self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): \"\"\"Non-linear SPDE", "observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale, num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator, mesh_shape=(dim_state,), domain_extents=(domain_extent,), domain_is_periodic=True, observation_node_indices=observation_space_indices, )", "freqs_sq ** 2 - damping_coeff def nonlinear_operator(v, freqs, freqs_sq): return", "equilibrium. Progress in Theoretical Physcs, 55 (1976) pp. 356–369. 2.", "ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ where", "`κ`, `ωₖ = 2 * pi * k / S`", "to apply to subsampled state field to compute mean of", "time step. domain_extent: Extent (size) of spatial domain. damping_coeff: Coefficient", "(1977) pp. 1177–1206. \"\"\" from typing import Union, Optional, Sequence,", "state_noise_kernel, False ) initial_state_kernel = ( initial_state_amplitude * np.exp(-0.5 *", "coefficients. For more details see the docstring of `FourierLaminarFlameModel`. \"\"\"", "rather than values of the state field at the spatial", "and `⊛` indicates circular convolution in the spatial coordinate. Using", "SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233. 4. Cox,", "** 0.5 ) state_noise_std = rfft_coeff_to_real_array( state_noise_kernel + 1j *", "of damping in the dynamics, `W(s, t)` a space-time white", "smoother fields. num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use", "2] = 0 state_noise_kernel = ( (time_step) ** 0.5 *", "terms [3, 4] is used to integrate the deterministic component", "+ ∂²X/∂s² + X * ∂X/∂s + γ * X)", "dW̃ₖ where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the", "dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s +", "where `s` is the spatial coordinate in a periodic domain", "equations (SDEs) in the Fourier coefficients X̃ₖ dX̃ₖ = (ωₖ²", "initial_state_amplitude: Amplitude scale parameter for initial random state field. Larger", "Scientific Computing 26.4 (2005): 1214-1233. 4. Cox, <NAME>. and Matthews,", "Wiener process increment. The smoothing kernel Fourier coefficients are assumed", "The governing stochastic partial differential equation (SPDE) is dX =", "def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray: subsampled_states =", "kernel Fourier coefficients are assumed to be κ̃ₖ = σ", "domain for laminar flame fronts. This model class represents the", "state. state_noise_amplitude: Amplitude scale parameter for additive state noise in", "= rfft_coeff_to_real_array( state_noise_kernel + 1j * state_noise_kernel, False ) initial_state_kernel", "and `i` the imaginary unit. A Fourier-domain exponential time-differencing integrator", "1.0, num_roots_of_unity_etdrk4_integrator: int = 16, ): \"\"\" Args: dim_state: Dimension", "in the dynamics, `W(s, t)` a space-time white noise process,", "Noise in each dimension assumed to be independent i.e. a", "mesh points in spatial discretization. observation_space_indices: Slice or sequence of", "larger magnitude additive noise in the state field. state_noise_length_scale: Length", "shape `(dim_observation,)`. Noise in each dimension assumed to be independent", "* 2 * np.pi / domain_extent spatial_freqs_sq = spatial_freqs **", "states: np.ndarray, t: int) -> np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) )", "controlling the amplitude and `ℓ` a parameter controlling the length", "its values at the spatial mesh points rather than the", "spatial smoothing kernel and `⊛` indicates circular convolution in the", "* X̃ₖ + (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²)", "initial_state_kernel + 1j * initial_state_kernel, False ) def linear_operator(freqs, freqs_sq):", "fields. Larger values correspond to smoother fields. num_roots_of_unity_etdrk4_integrator: Number of", "by its the Fourier coefficients rather than values of the", "X̃ₖ dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) * X̃ₖ", "Progress in Theoretical Physcs, 55 (1976) pp. 356–369. 2. Sivashinsky.", "the dynamics, `W(s, t)` a space-time white noise process, `κ(s)`", "rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE model on a periodic", "** 0.5 ) initial_state_std = rfft_coeff_to_real_array( initial_state_kernel + 1j *", "of Computational Physics 176.2 (2002): 430-455. \"\"\" def __init__( self,", "observation points. observation_function: Function to apply to subsampled state field", "state field. Larger values correspond to larger magnitude values for", "512, observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray,", "parameter for smoothed noise used to generate initial state and", "spatial_freqs_sq = spatial_freqs ** 2 spatial_freqs[dim_state // 2] = 0", "float = 1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int =", "spatial mesh node indices (indices in to state vector) corresponding", "vector) corresponding to observation points. observation_function: Function to apply to", "observation_noise_std: Standard deviation of additive Gaussian noise in observations. Either", "smoothed noise used to generate initial state and additive state", "covariance. initial_state_amplitude: Amplitude scale parameter for initial random state field.", "time_step: float = 0.25, domain_extent: float = 32 * np.pi,", "where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the kth", "node indices (indices in to state vector) corresponding to observation", "self.observation_function = observation_function spatial_freqs = np.arange(dim_state // 2 + 1)", "the spatial mesh points. Based on the Kuramato--Sivashinsky PDE model", "additive state noise in model dynamics. Larger values correspond to", "* freqs * fft.rfft(fft.irfft(v, norm=\"ortho\") ** 2, norm=\"ortho\") ) self.integrator", "initial_state_kernel = ( initial_state_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale", "spatial mesh points rather than the corresponding Fourier coefficients. For", "Function to apply to subsampled state field to compute mean", "dim_state: int = 512, observation_space_indices: Union[slice, Sequence[int]] = slice(4, None,", "`W(s, t)` a space-time white noise process, `κ(s)` a spatial", "length scale. References: 1. Kuramoto and Tsuzuki. Persistent propagation of", "Physics 176.2 (2002): 430-455. \"\"\" def __init__( self, dim_state: int", "the amplitude and `ℓ` a parameter controlling the length scale.", "contour integrals in exponential time-differencing plus fourth-order Runge Kutta integrator.", "Callable import numpy as np from dapy.models.base import AbstractDiagonalGaussianModel from", "values at the spatial mesh points rather than the corresponding", "Length scale parameter for smoothed noise used to generate initial", "random state field. Larger values correspond to larger magnitude values", "which exhibits spatio-temporally chaotic dynamics. The governing stochastic partial differential", "approximating contour integrals in exponential time-differencing plus fourth-order Runge Kutta", "the SDE dynamics and an Euler-Maruyama discretisation used for the", "assumed to be independent i.e. a diagonal noise covariance. initial_state_amplitude:", "freqs_sq): return freqs_sq - freqs_sq ** 2 - damping_coeff def", "freqs * fft.rfft(fft.irfft(v, norm=\"ortho\") ** 2, norm=\"ortho\") ) self.integrator =", "the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally chaotic", "spatial domain for laminar wave fronts. Based on the Kuramato--Sivashinsky", "are assumed to be κ̃ₖ = σ * exp(-ωₖ² *", "dynamics. References: 1. Kuramoto and Tsuzuki. Persistent propagation of concentration", "in exponential time-differencing plus fourth-order Runge Kutta integrator. \"\"\" super().__init__(", "hydrodynamic instability in laminar flames I. Derivation of basic equations.", "Acta Astronomica, 4 (1977) pp. 1177–1206. \"\"\" from typing import", "at the spatial mesh points. Based on the Kuramato--Sivashinsky PDE", "a parameter controlling the amplitude and `ℓ` a parameter controlling", "/ S` the kth spatial frequency and `i` the imaginary", "_next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states))", "rfft_coeff_to_real_array( initial_state_kernel + 1j * initial_state_kernel, False ) def linear_operator(freqs,", "* ∂X/∂s + γ * X) dt + κ ⊛", "+ 1j * state_noise_kernel, False ) initial_state_kernel = ( initial_state_amplitude", "np.ndarray, t: int) -> np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ...,", "Fourier coefficients. For more details see the docstring of `FourierLaminarFlameModel`.", "freqs_sq - freqs_sq ** 2 - damping_coeff def nonlinear_operator(v, freqs,", "DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ where `W̃ₖ` is a complex-valued", ") def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray: return", "* ℓ²) * √(M / S) where `σ` is a", "+ (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ", "subsampled state field to compute mean of observation(s) given state(s)", "Sequence, Callable import numpy as np from dapy.models.base import AbstractDiagonalGaussianModel", "in model dynamics. Larger values correspond to larger magnitude additive", "larger magnitude values for the initial state. state_noise_amplitude: Amplitude scale", "Euler-Maruyama discretisation used for the Wiener process increment. The smoothing", "freqs, freqs_sq): return ( -0.5j * freqs * fft.rfft(fft.irfft(v, norm=\"ortho\")", "dimension assumed to be independent i.e. a diagonal noise covariance.", "self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, )", "Runge Kutta integrator. \"\"\" super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent,", "fronts. Based on the Kuramato--Sivashinsky PDE model [1, 2] which", "for the initial state. state_noise_amplitude: Amplitude scale parameter for additive", "mean of observation(s) given state(s) at a given time index.", "spatial coordinate in a periodic domain `[0, S)`, `t` the", "observation_noise_std: float = 0.5, initial_state_amplitude: float = 1.0, state_noise_amplitude: float", "Using a spectral spatial discretisation, this corresponds to a non-linear", "= -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s + γ", "by its values at the spatial mesh points rather than", "+ X * ∂X/∂s + γ * X) dt +", "deterministic component of the SDE dynamics and an Euler-Maruyama discretisation", "for the Wiener process increment. The smoothing kernel Fourier coefficients", "a spectral spatial discretisation, this corresponds to a non-linear system", "1177–1206. 3. Kassam, Aly-Khan and Trefethen, <NAME>. Fourth-order time-stepping for", "to observation points. observation_function: Function to apply to subsampled state", "* DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ where `W̃ₖ` is a", "\"State dimension `dim_state` must be even\" self.time_step = time_step self.observation_space_indices", "1j * initial_state_kernel, False ) def linear_operator(freqs, freqs_sq): return freqs_sq", "state field by its the Fourier coefficients rather than values", "model class represents the state field by its values at", "order Runge-- Kutta updates for non-linear terms [3, 4] is", "in the spatial coordinate. Using a spectral spatial discretisation, this", "dW where `s` is the spatial coordinate in a periodic", "float = 0.25, domain_extent: float = 32 * np.pi, damping_coeff:", "float = 32 * np.pi, damping_coeff: float = 1.0 /", "Astronomica, 4 (1977) pp. 1177–1206. 3. Kassam, Aly-Khan and Trefethen,", "<NAME>. Fourth-order time-stepping for stiff PDEs. SIAM Journal on Scientific", "a coefficient controlling the degree of damping in the dynamics,", "num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function is None: dim_observation", "float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, ): \"\"\" Args:", "state_noise_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2) *", "is dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s", "degree of damping in the dynamics, `W(s, t)` a space-time", "of mesh points in spatial discretization. observation_space_indices: Slice or sequence", "stochastic partial differential equation (SPDE) is dX = -(∂⁴X/∂s⁴ +", "= 16, ): \"\"\" Args: dim_state: Dimension of state which", "\"\"\" from typing import Union, Optional, Sequence, Callable import numpy", "class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): \"\"\"Non-linear SPDE model on", "equivalent here to number of mesh points in spatial discretization.", "np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2) * (dim_state /", "non-linear system of stochastic differential equations (SDEs) in the Fourier", "Number of roots of unity to use in approximating contour", "differencing for stiff systems. Journal of Computational Physics 176.2 (2002):", "discretization. observation_space_indices: Slice or sequence of integers specifying spatial mesh", "state_noise_length_scale ** 2) * (dim_state / domain_extent) ** 0.5 )", "thermal equilibrium. Progress in Theoretical Physcs, 55 (1976) pp. 356–369.", "corresponds to a non-linear system of stochastic differential equations (SDEs)", "int) -> np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self, states:", "spatial_freqs_sq * state_noise_length_scale ** 2) * (dim_state / domain_extent) **", "pp. 1177–1206. 3. Kassam, Aly-Khan and Trefethen, <NAME>. Fourth-order time-stepping", "Computing 26.4 (2005): 1214-1233. 4. Cox, <NAME>. and Matthews, <NAME>.", "domain `[0, S)`, `t` the time coordinate, `X(s, t)` the", "import AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator", "The smoothing kernel Fourier coefficients are assumed to be κ̃ₖ", "field at the spatial mesh points. Based on the Kuramato--Sivashinsky", "Wiener process, `κ̃ₖ` the kth Fourier coefficient of the smoothing", "state field by its values at the spatial mesh points", "the state field at the spatial mesh points. Based on", "correspond to larger magnitude values for the initial state. state_noise_amplitude:", "False ) def linear_operator(freqs, freqs_sq): return freqs_sq - freqs_sq **", "* √(M / S) where `σ` is a parameter controlling", "self.time_step = time_step self.observation_space_indices = observation_space_indices self.observation_function = observation_function spatial_freqs", "for additive state noise in model dynamics. Larger values correspond", "FourierETDRK4Integrator from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, )", "slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, time_step:", "= 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, **kwargs ): \"\"\" Args:", "2) * (dim_state / domain_extent) ** 0.5 ) initial_state_std =", "176.2 (2002): 430-455. \"\"\" def __init__( self, dim_state: int =", "AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator from", ") if observation_function is None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation", "= time_step self.observation_space_indices = observation_space_indices self.observation_function = observation_function spatial_freqs =", "return ( -0.5j * freqs * fft.rfft(fft.irfft(v, norm=\"ortho\") ** 2,", "chaotic dynamics. References: 1. Kuramoto and Tsuzuki. Persistent propagation of", "equations. Acta Astronomica, 4 (1977) pp. 1177–1206. \"\"\" from typing", "observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray, int],", "2) * (dim_state / domain_extent) ** 0.5 ) state_noise_std =", "laminar wave fronts. Based on the Kuramato--Sivashinsky PDE model [1,", "- damping_coeff def nonlinear_operator(v, freqs, freqs_sq): return ( -0.5j *", "= spatial_freqs ** 2 spatial_freqs[dim_state // 2] = 0 state_noise_kernel", "the smoothing kernel `κ`, `ωₖ = 2 * pi *", "damping_coeff: float = 1.0 / 6, observation_noise_std: float = 0.5,", "0.5, initial_state_amplitude: float = 1.0, state_noise_amplitude: float = 1.0, state_noise_length_scale:", "state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, ): \"\"\"", "a complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient of", "to identity function in first argument. time_step: Integrator time step.", "plus fourth-order Runge Kutta integrator. \"\"\" assert dim_state % 2", "corresponding Fourier coefficients. For more details see the docstring of", "( -0.5j * freqs * fft.rfft(fft.irfft(v, norm=\"ortho\") ** 2, norm=\"ortho\")", "and additive state noise fields. Larger values correspond to smoother", "noise in the state field. state_noise_length_scale: Length scale parameter for", "t: int) -> np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self,", "`FourierLaminarFlameModel`. \"\"\" def __init__( self, dim_state: int = 512, observation_space_indices:", "assumed to be κ̃ₖ = σ * exp(-ωₖ² * ℓ²)", "state field. state_noise_length_scale: Length scale parameter for smoothed noise used", "state_noise_amplitude: Amplitude scale parameter for additive state noise in model", "Fourier coefficients rather than values of the state field at", "Coefficient (`γ` in description above) controlling degree of damping in", "use in approximating contour integrals in exponential time-differencing plus fourth-order", "parameter controlling the amplitude and `ℓ` a parameter controlling the", "to use in approximating contour integrals in exponential time-differencing plus", "* k / S` the kth spatial frequency and `i`", "kth Fourier coefficient of the smoothing kernel `κ`, `ωₖ =", "rather than the corresponding Fourier coefficients. For more details see", "Physcs, 55 (1976) pp. 356–369. 2. Sivashinsky. Nonlinear analysis of", "chaotic dynamics. The governing stochastic partial differential equation (SPDE) is", "1) * 2 * np.pi / domain_extent spatial_freqs_sq = spatial_freqs", "coefficients X̃ₖ dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) *", "even\" self.time_step = time_step self.observation_space_indices = observation_space_indices self.observation_function = observation_function", "Theoretical Physcs, 55 (1976) pp. 356–369. 2. Sivashinsky. Nonlinear analysis", "// 2] = 0 state_noise_kernel = ( (time_step) ** 0.5", "roots of unity to use in approximating contour integrals in", "circular convolution in the spatial coordinate. Using a spectral spatial", "array of shape `(dim_observation,)`. Noise in each dimension assumed to", "a periodic 1D spatial domain for laminar wave fronts. Based", "Derivation of basic equations. Acta Astronomica, 4 (1977) pp. 1177–1206.", "domain_extent spatial_freqs_sq = spatial_freqs ** 2 spatial_freqs[dim_state // 2] =", "of stochastic differential equations (SDEs) in the Fourier coefficients X̃ₖ", "356–369. 2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar", "2 - damping_coeff def nonlinear_operator(v, freqs, freqs_sq): return ( -0.5j", "details see the docstring of `FourierLaminarFlameModel`. \"\"\" def __init__( self,", "dynamics. Larger values correspond to larger magnitude additive noise in", "corresponding to observation points. observation_function: Function to apply to subsampled", "`γ` a coefficient controlling the degree of damping in the", "or array of shape `(dim_observation,)`. Noise in each dimension assumed", "specifying spatial mesh node indices (indices in to state vector)", "** 2 spatial_freqs[dim_state // 2] = 0 state_noise_kernel = (", "2] which exhibits spatio-temporally chaotic dynamics. References: 1. Kuramoto and", "Matthews, <NAME>. Exponential time differencing for stiff systems. Journal of", "in observations. Either a scalar or array of shape `(dim_observation,)`.", "for initial random state field. Larger values correspond to larger", "state which is equivalent here to number of mesh points", "-> np.ndarray: return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self, states: np.ndarray,", "fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE model on", "unity to use in approximating contour integrals in exponential time-differencing", "_observation_mean(self, states: np.ndarray, t: int) -> np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states),", "than the corresponding Fourier coefficients. For more details see the", "Args: dim_state: Dimension of state which is equivalent here to", "fourth-order Runge Kutta integrator. \"\"\" super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step,", "Fourier-domain exponential time-differencing integrator with 4th order Runge-- Kutta updates", "ℓ²) * √(M / S) where `σ` is a parameter", "where `σ` is a parameter controlling the amplitude and `ℓ`", "Larger values correspond to smoother fields. num_roots_of_unity_etdrk4_integrator: Number of roots", "( (time_step) ** 0.5 * state_noise_amplitude * np.exp(-0.5 * spatial_freqs_sq", "( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE", "to be κ̃ₖ = σ * exp(-ωₖ² * ℓ²) *", "1.0, state_noise_amplitude: float = 1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator:", "state and additive state noise fields. Larger values correspond to", "float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, **kwargs ): \"\"\"", "self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray:", "degree of damping in dynamics. observation_noise_std: Standard deviation of additive", "Amplitude scale parameter for initial random state field. Larger values", "in dissipative media far from thermal equilibrium. Progress in Theoretical", "more details see the docstring of `FourierLaminarFlameModel`. \"\"\" def __init__(", "dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel):", "on a periodic 1D spatial domain for laminar wave fronts.", "def linear_operator(freqs, freqs_sq): return freqs_sq - freqs_sq ** 2 -", "scale parameter for initial random state field. Larger values correspond", "a given time index. Defaults to identity function in first", "1D spatial domain for laminar wave fronts. Based on the", "= 1.0 / 6, observation_noise_std: float = 0.5, initial_state_amplitude: float", "nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function is None:", "`⊛` indicates circular convolution in the spatial coordinate. Using a", "observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale, num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator, mesh_shape=(dim_state,),", "Larger values correspond to larger magnitude additive noise in the", "dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs ) def _next_state_mean(self, states:", "domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale, num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator, mesh_shape=(dim_state,), domain_extents=(domain_extent,), domain_is_periodic=True,", "points in spatial discretization. observation_space_indices: Slice or sequence of integers", "complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient of the", "1D spatial domain for laminar flame fronts. This model class", "<NAME>. Exponential time differencing for stiff systems. Journal of Computational", "of observation(s) given state(s) at a given time index. Defaults", "time index. Defaults to identity function in first argument. time_step:", "= observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state),", "smoothing kernel Fourier coefficients are assumed to be κ̃ₖ =", ") def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray: subsampled_states", "controlling degree of damping in dynamics. observation_noise_std: Standard deviation of", "X * ∂X/∂s + γ * X) dt + κ", "+ κ ⊛ dW where `s` is the spatial coordinate", "deviation of additive Gaussian noise in observations. Either a scalar", "rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self, states: np.ndarray, t: int) ->", "a spatial smoothing kernel and `⊛` indicates circular convolution in", "kernel and `⊛` indicates circular convolution in the spatial coordinate.", "num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in approximating", "0 state_noise_kernel = ( (time_step) ** 0.5 * state_noise_amplitude *", "periodic domain `[0, S)`, `t` the time coordinate, `X(s, t)`", "* state_noise_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)", "dimension `dim_state` must be even\" self.time_step = time_step self.observation_space_indices =", "white noise process, `κ(s)` a spatial smoothing kernel and `⊛`", "\"\"\" super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude,", "to integrate the deterministic component of the SDE dynamics and", "coordinate in a periodic domain `[0, S)`, `t` the time", "in spatial discretization. observation_space_indices: Slice or sequence of integers specifying", "S) where `σ` is a parameter controlling the amplitude and", "return rfft_coeff_to_real_array( self.integrator.step(real_array_to_rfft_coeff(states)) ) def _observation_mean(self, states: np.ndarray, t: int)", "indicates circular convolution in the spatial coordinate. Using a spectral", "concentration waves in dissipative media far from thermal equilibrium. Progress", "is a complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient", "in description above) controlling degree of damping in dynamics. observation_noise_std:", "int], np.ndarray]] = None, time_step: float = 0.25, domain_extent: float", "the spatial coordinate in a periodic domain `[0, S)`, `t`", "at the spatial mesh points rather than the corresponding Fourier", "domain_extent) ** 0.5 ) initial_state_std = rfft_coeff_to_real_array( initial_state_kernel + 1j", "the state field by its values at the spatial mesh", "** 2) * (dim_state / domain_extent) ** 0.5 ) state_noise_std", "process, `κ̃ₖ` the kth Fourier coefficient of the smoothing kernel", "2, norm=\"ortho\") ) self.integrator = FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent,", "stochastic differential equations (SDEs) in the Fourier coefficients X̃ₖ dX̃ₖ", "and an Euler-Maruyama discretisation used for the Wiener process increment.", "amplitude and `ℓ` a parameter controlling the length scale. References:", "pp. 1177–1206. \"\"\" from typing import Union, Optional, Sequence, Callable", "values correspond to larger magnitude values for the initial state.", "integrate the deterministic component of the SDE dynamics and an", "0 ).shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs", "laminar flame fronts. This model class represents the state field", "γ * X) dt + κ ⊛ dW where `s`", "stiff systems. Journal of Computational Physics 176.2 (2002): 430-455. \"\"\"", "the state field. state_noise_length_scale: Length scale parameter for smoothed noise", "\"\"\" assert dim_state % 2 == 0, \"State dimension `dim_state`", "+ κ̃ₖ * dW̃ₖ where `W̃ₖ` is a complex-valued Wiener", "of state which is equivalent here to number of mesh", "values correspond to larger magnitude additive noise in the state", "from thermal equilibrium. Progress in Theoretical Physcs, 55 (1976) pp.", "media far from thermal equilibrium. Progress in Theoretical Physcs, 55", "to state vector) corresponding to observation points. observation_function: Function to", "if self.observation_function is None: return subsampled_states else: return self.observation_function(subsampled_states, t)", "of the state field at the spatial mesh points. Based", "from dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft,", "of the smoothing kernel `κ`, `ωₖ = 2 * pi", "import FourierETDRK4Integrator from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array,", "dynamics. observation_noise_std: Standard deviation of additive Gaussian noise in observations.", "time-stepping for stiff PDEs. SIAM Journal on Scientific Computing 26.4", "is None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices],", "Defaults to identity function in first argument. time_step: Integrator time", "subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ..., self.observation_space_indices ] if self.observation_function is", "domain_extent: float = 32 * np.pi, damping_coeff: float = 1.0", "observations. Either a scalar or array of shape `(dim_observation,)`. Noise", "1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, ):", "None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices], 0", "dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff,", "in first argument. time_step: Integrator time step. domain_extent: Extent (size)", "the initial state. state_noise_amplitude: Amplitude scale parameter for additive state", "6, observation_noise_std: float = 0.5, initial_state_amplitude: float = 1.0, state_noise_amplitude:", "super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude,", "state field to compute mean of observation(s) given state(s) at", "1177–1206. \"\"\" from typing import Union, Optional, Sequence, Callable import", "2 * pi * k / S` the kth spatial", "* pi * k / S` the kth spatial frequency", "4th order Runge-- Kutta updates for non-linear terms [3, 4]", "number of mesh points in spatial discretization. observation_space_indices: Slice or", "fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ..., self.observation_space_indices ] if self.observation_function is None: return", "mesh node indices (indices in to state vector) corresponding to", "= FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if", "and Matthews, <NAME>. Exponential time differencing for stiff systems. Journal", "model [1, 2] which exhibits spatio-temporally chaotic dynamics. The governing", "ωₖ⁴ - γ) * X̃ₖ + (i * ωₖ /", "* dW̃ₖ where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ`", ").shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs )", "the spatial mesh points rather than the corresponding Fourier coefficients.", "import numpy as np from dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial", "`[0, S)`, `t` the time coordinate, `X(s, t)` the state", "to compute mean of observation(s) given state(s) at a given", "spectral spatial discretisation, this corresponds to a non-linear system of", "= 0.5, initial_state_amplitude: float = 1.0, state_noise_amplitude: float = 1.0,", "initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs ) def _next_state_mean(self, states: np.ndarray, t:", "Larger values correspond to larger magnitude values for the initial", "be even\" self.time_step = time_step self.observation_space_indices = observation_space_indices self.observation_function =", "numpy as np from dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial import", "the Fourier coefficients rather than values of the state field", "exponential time-differencing plus fourth-order Runge Kutta integrator. \"\"\" super().__init__( dim_state=dim_state,", "Optional, Sequence, Callable import numpy as np from dapy.models.base import", "OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE model", "to smoother fields. num_roots_of_unity_etdrk4_integrator: Number of roots of unity to", "= observation_function spatial_freqs = np.arange(dim_state // 2 + 1) *", "norm=\"ortho\")[ ..., self.observation_space_indices ] if self.observation_function is None: return subsampled_states", "SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): \"\"\"Non-linear SPDE model on a", "0, \"State dimension `dim_state` must be even\" self.time_step = time_step", "spatio-temporally chaotic dynamics. The governing stochastic partial differential equation (SPDE)", "noise used to generate initial state and additive state noise", "(indices in to state vector) corresponding to observation points. observation_function:", "generate initial state and additive state noise fields. Larger values", "model [1, 2] which exhibits spatio-temporally chaotic dynamics. References: 1.", "state_noise_kernel = ( (time_step) ** 0.5 * state_noise_amplitude * np.exp(-0.5", "pi * k / S` the kth spatial frequency and", "scale. References: 1. Kuramoto and Tsuzuki. Persistent propagation of concentration", "damping_coeff def nonlinear_operator(v, freqs, freqs_sq): return ( -0.5j * freqs", "num_roots_of_unity_etdrk4_integrator: int = 16, ): \"\"\" Args: dim_state: Dimension of", "sequence of integers specifying spatial mesh node indices (indices in", "observation_space_indices self.observation_function = observation_function spatial_freqs = np.arange(dim_state // 2 +", "else: return self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ):", "class represents the state field by its values at the", "Fourier coefficients are assumed to be κ̃ₖ = σ *", "4. Cox, <NAME>. and Matthews, <NAME>. Exponential time differencing for", "spatial frequency and `i` the imaginary unit. A Fourier-domain exponential", "parameter for initial random state field. Larger values correspond to", "dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std,", "= 512, observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8), observation_function:", "domain for laminar wave fronts. Based on the Kuramato--Sivashinsky PDE", "domain_extent: Extent (size) of spatial domain. damping_coeff: Coefficient (`γ` in", "points rather than the corresponding Fourier coefficients. For more details", "than values of the state field at the spatial mesh", "for laminar flame fronts. This model class represents the state", "controlling the length scale. References: 1. Kuramoto and Tsuzuki. Persistent", "increment. The smoothing kernel Fourier coefficients are assumed to be", "scale parameter for smoothed noise used to generate initial state", "spatial_freqs[dim_state // 2] = 0 state_noise_kernel = ( (time_step) **", "Acta Astronomica, 4 (1977) pp. 1177–1206. 3. Kassam, Aly-Khan and", "t)` the state field process, `γ` a coefficient controlling the", "far from thermal equilibrium. Progress in Theoretical Physcs, 55 (1976)", "state field at the spatial mesh points. Based on the", "float = 1.0 / 6, observation_noise_std: float = 0.5, initial_state_amplitude:", "field to compute mean of observation(s) given state(s) at a", "Optional[Callable[[np.ndarray, int], np.ndarray]] = None, time_step: float = 0.25, domain_extent:", "55 (1976) pp. 356–369. 2. Sivashinsky. Nonlinear analysis of hydrodynamic", "None: return subsampled_states else: return self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn,", "the state field process, `γ` a coefficient controlling the degree", "def nonlinear_operator(v, freqs, freqs_sq): return ( -0.5j * freqs *", "in each dimension assumed to be independent i.e. a diagonal", "def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray: return rfft_coeff_to_real_array(", "spatial domain. damping_coeff: Coefficient (`γ` in description above) controlling degree", "wave fronts. Based on the Kuramato--Sivashinsky PDE model [1, 2]", "from dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms", "np.arange(dim_state // 2 + 1) * 2 * np.pi /", "`W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the kth Fourier", "dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import", "the corresponding Fourier coefficients. For more details see the docstring", "typing import Union, Optional, Sequence, Callable import numpy as np", "`ωₖ = 2 * pi * k / S` the", "and Trefethen, <NAME>. Fourth-order time-stepping for stiff PDEs. SIAM Journal", "for laminar wave fronts. Based on the Kuramato--Sivashinsky PDE model", "its the Fourier coefficients rather than values of the state", "model on a periodic 1D spatial domain for laminar flame", "Journal on Scientific Computing 26.4 (2005): 1214-1233. 4. Cox, <NAME>.", "Nonlinear analysis of hydrodynamic instability in laminar flames I. Derivation", "X) dt + κ ⊛ dW where `s` is the", "* initial_state_kernel, False ) def linear_operator(freqs, freqs_sq): return freqs_sq -", "damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale, num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator, mesh_shape=(dim_state,), domain_extents=(domain_extent,), domain_is_periodic=True, observation_node_indices=observation_space_indices,", "process, `κ(s)` a spatial smoothing kernel and `⊛` indicates circular", "Aly-Khan and Trefethen, <NAME>. Fourth-order time-stepping for stiff PDEs. SIAM", "step. domain_extent: Extent (size) of spatial domain. damping_coeff: Coefficient (`γ`", "points. Based on the Kuramato--Sivashinsky PDE model [1, 2] which", "the kth Fourier coefficient of the smoothing kernel `κ`, `ωₖ", "coordinate. Using a spectral spatial discretisation, this corresponds to a", "time differencing for stiff systems. Journal of Computational Physics 176.2", "0.5 ) state_noise_std = rfft_coeff_to_real_array( state_noise_kernel + 1j * state_noise_kernel,", "Union, Optional, Sequence, Callable import numpy as np from dapy.models.base", "field. state_noise_length_scale: Length scale parameter for smoothed noise used to", "FourierETDRK4Integrator( linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function", "to subsampled state field to compute mean of observation(s) given", "num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function is None: dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0] else:", "docstring of `FourierLaminarFlameModel`. \"\"\" def __init__( self, dim_state: int =", "+ 1) * 2 * np.pi / domain_extent spatial_freqs_sq =", "state(s) at a given time index. Defaults to identity function", "be κ̃ₖ = σ * exp(-ωₖ² * ℓ²) * √(M", "S` the kth spatial frequency and `i` the imaginary unit.", "a scalar or array of shape `(dim_observation,)`. Noise in each", "int = 16, **kwargs ): \"\"\" Args: dim_state: Dimension of", "FourierLaminarFlameModel, ): \"\"\"Non-linear SPDE model on a periodic 1D spatial", "Journal of Computational Physics 176.2 (2002): 430-455. \"\"\" def __init__(", "0.5 * state_noise_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale **", ") def linear_operator(freqs, freqs_sq): return freqs_sq - freqs_sq ** 2", "plus fourth-order Runge Kutta integrator. \"\"\" super().__init__( dim_state=dim_state, observation_space_indices=observation_space_indices, observation_function=observation_function,", "* np.pi / domain_extent spatial_freqs_sq = spatial_freqs ** 2 spatial_freqs[dim_state", "the deterministic component of the SDE dynamics and an Euler-Maruyama", "return self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): \"\"\"Non-linear", "/ domain_extent) ** 0.5 ) state_noise_std = rfft_coeff_to_real_array( state_noise_kernel +", "systems. Journal of Computational Physics 176.2 (2002): 430-455. \"\"\" def", "linear_operator=linear_operator, nonlinear_operator=nonlinear_operator, num_mesh_point=dim_state, domain_size=domain_extent, time_step=time_step, num_roots_of_unity=num_roots_of_unity_etdrk4_integrator, ) if observation_function is", "damping in the dynamics, `W(s, t)` a space-time white noise", "in to state vector) corresponding to observation points. observation_function: Function", "linear_operator(freqs, freqs_sq): return freqs_sq - freqs_sq ** 2 - damping_coeff", "Either a scalar or array of shape `(dim_observation,)`. Noise in", "initial random state field. Larger values correspond to larger magnitude", "// 2 + 1) * 2 * np.pi / domain_extent", "Tsuzuki. Persistent propagation of concentration waves in dissipative media far", "`(dim_observation,)`. Noise in each dimension assumed to be independent i.e.", "int) -> np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ..., self.observation_space_indices ]", "PDE model [1, 2] which exhibits spatio-temporally chaotic dynamics. References:", "real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE model on a", "dissipative media far from thermal equilibrium. Progress in Theoretical Physcs,", "observation_space_indices=observation_space_indices, observation_function=observation_function, time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale, num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator,", "t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): \"\"\"Non-linear SPDE model", "This model class represents the state field by its values", "dynamics and an Euler-Maruyama discretisation used for the Wiener process", "stiff PDEs. SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233.", "of additive Gaussian noise in observations. Either a scalar or", "subsampled_states else: return self.observation_function(subsampled_states, t) class SpatialLaminarFlameModel( SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel,", "coordinate, `X(s, t)` the state field process, `γ` a coefficient", "i.e. a diagonal noise covariance. initial_state_amplitude: Amplitude scale parameter for", "pp. 356–369. 2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in", "basic equations. Acta Astronomica, 4 (1977) pp. 1177–1206. 3. Kassam,", "np.pi, damping_coeff: float = 1.0 / 6, observation_noise_std: float =", "= np.zeros(dim_state)[observation_space_indices].shape[0] else: dim_observation = observation_function( np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__(", "a space-time white noise process, `κ(s)` a spatial smoothing kernel", "2 spatial_freqs[dim_state // 2] = 0 state_noise_kernel = ( (time_step)", "here to number of mesh points in spatial discretization. observation_space_indices:", "represents the state field by its the Fourier coefficients rather", "\"\"\"Non-linear SPDE model on a periodic 1D spatial domain for", "governing stochastic partial differential equation (SPDE) is dX = -(∂⁴X/∂s⁴", "SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,", "of spatial domain. damping_coeff: Coefficient (`γ` in description above) controlling", "spatial discretisation, this corresponds to a non-linear system of stochastic", "initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std, **kwargs ) def _next_state_mean(self, states: np.ndarray,", "= np.arange(dim_state // 2 + 1) * 2 * np.pi", "which exhibits spatio-temporally chaotic dynamics. References: 1. Kuramoto and Tsuzuki.", "Union[slice, Sequence[int]] = slice(4, None, 8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]]", "identity function in first argument. time_step: Integrator time step. domain_extent:", "of `FourierLaminarFlameModel`. \"\"\" def __init__( self, dim_state: int = 512,", "(SPDE) is dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X *", "1.0 / 6, observation_noise_std: float = 0.5, initial_state_amplitude: float =", "for stiff systems. Journal of Computational Physics 176.2 (2002): 430-455.", "* X) dt + κ ⊛ dW where `s` is", "Runge Kutta integrator. \"\"\" assert dim_state % 2 == 0,", "a parameter controlling the length scale. References: 1. Kuramoto and", "noise covariance. initial_state_amplitude: Amplitude scale parameter for initial random state", "with 4th order Runge-- Kutta updates for non-linear terms [3,", "noise in model dynamics. Larger values correspond to larger magnitude", "space-time white noise process, `κ(s)` a spatial smoothing kernel and", "model on a periodic 1D spatial domain for laminar wave", "t: int) -> np.ndarray: subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm=\"ortho\")[ ..., self.observation_space_indices", "time coordinate, `X(s, t)` the state field process, `γ` a", "Cox, <NAME>. and Matthews, <NAME>. Exponential time differencing for stiff", "References: 1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves", "field by its the Fourier coefficients rather than values of", "Exponential time differencing for stiff systems. Journal of Computational Physics", "field by its values at the spatial mesh points rather", "function in first argument. time_step: Integrator time step. domain_extent: Extent", "= ( (time_step) ** 0.5 * state_noise_amplitude * np.exp(-0.5 *", "1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves in", "dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) * X̃ₖ +", "import Union, Optional, Sequence, Callable import numpy as np from", "class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE model on a periodic 1D spatial", "∂X/∂s + γ * X) dt + κ ⊛ dW", "exponential time-differencing integrator with 4th order Runge-- Kutta updates for", "* np.pi, damping_coeff: float = 1.0 / 6, observation_noise_std: float", "of unity to use in approximating contour integrals in exponential", "of shape `(dim_observation,)`. Noise in each dimension assumed to be", "* state_noise_length_scale ** 2) * (dim_state / domain_extent) ** 0.5", "initial_state_kernel, False ) def linear_operator(freqs, freqs_sq): return freqs_sq - freqs_sq", "= 1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16,", "additive noise in the state field. state_noise_length_scale: Length scale parameter", "0.5 ) initial_state_std = rfft_coeff_to_real_array( initial_state_kernel + 1j * initial_state_kernel,", "dynamics. The governing stochastic partial differential equation (SPDE) is dX", "is the spatial coordinate in a periodic domain `[0, S)`,", ") class FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE model on a periodic 1D", "exponential time-differencing plus fourth-order Runge Kutta integrator. \"\"\" assert dim_state", "Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar flames I.", "this corresponds to a non-linear system of stochastic differential equations", "+ γ * X) dt + κ ⊛ dW where", "dim_state: Dimension of state which is equivalent here to number", "independent i.e. a diagonal noise covariance. initial_state_amplitude: Amplitude scale parameter", "κ̃ₖ = σ * exp(-ωₖ² * ℓ²) * √(M /", "\"\"\" Args: dim_state: Dimension of state which is equivalent here", "* (dim_state / domain_extent) ** 0.5 ) initial_state_std = rfft_coeff_to_real_array(", "= 2 * pi * k / S` the kth", "** 2) * (dim_state / domain_extent) ** 0.5 ) initial_state_std", "spatial coordinate. Using a spectral spatial discretisation, this corresponds to", "For more details see the docstring of `FourierLaminarFlameModel`. \"\"\" def", "return freqs_sq - freqs_sq ** 2 - damping_coeff def nonlinear_operator(v,", "self.observation_function is None: return subsampled_states else: return self.observation_function(subsampled_states, t) class", "differential equation (SPDE) is dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² +", "imaginary unit. A Fourier-domain exponential time-differencing integrator with 4th order", "to number of mesh points in spatial discretization. observation_space_indices: Slice", "observation_space_indices: Slice or sequence of integers specifying spatial mesh node", "observation_function: Function to apply to subsampled state field to compute", "controlling the degree of damping in the dynamics, `W(s, t)`", "coefficients are assumed to be κ̃ₖ = σ * exp(-ωₖ²", "np.zeros(dim_state)[observation_space_indices], 0 ).shape[0] super().__init__( dim_state=dim_state, dim_observation=dim_observation, initial_state_std=initial_state_std, initial_state_mean=np.zeros(dim_state), state_noise_std=state_noise_std, observation_noise_std=observation_noise_std,", "for non-linear terms [3, 4] is used to integrate the", "[3, 4] is used to integrate the deterministic component of", "S)`, `t` the time coordinate, `X(s, t)` the state field", "smoothing kernel `κ`, `ωₖ = 2 * pi * k", "(1977) pp. 1177–1206. 3. Kassam, Aly-Khan and Trefethen, <NAME>. Fourth-order", "num_roots_of_unity_etdrk4_integrator: int = 16, **kwargs ): \"\"\" Args: dim_state: Dimension", "and `ℓ` a parameter controlling the length scale. References: 1.", "float = 0.5, initial_state_amplitude: float = 1.0, state_noise_amplitude: float =", "waves in dissipative media far from thermal equilibrium. Progress in", "is a parameter controlling the amplitude and `ℓ` a parameter", "* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2) * (dim_state", "argument. time_step: Integrator time step. domain_extent: Extent (size) of spatial", "field. Larger values correspond to larger magnitude values for the", "I. Derivation of basic equations. Acta Astronomica, 4 (1977) pp.", "`s` is the spatial coordinate in a periodic domain `[0,", "parameter controlling the length scale. References: 1. Kuramoto and Tsuzuki.", "fronts. This model class represents the state field by its", "freqs_sq): return ( -0.5j * freqs * fft.rfft(fft.irfft(v, norm=\"ortho\") **", "rfft_coeff_to_real_array( state_noise_kernel + 1j * state_noise_kernel, False ) initial_state_kernel =", "1.0, state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, **kwargs", "description above) controlling degree of damping in dynamics. observation_noise_std: Standard", "kernel `κ`, `ωₖ = 2 * pi * k /", "κ ⊛ dW where `s` is the spatial coordinate in", "time_step=time_step, domain_extent=domain_extent, damping_coeff=damping_coeff, observation_noise_std=observation_noise_std, initial_state_amplitude=initial_state_amplitude, state_noise_amplitude=state_noise_amplitude, state_noise_length_scale=state_noise_length_scale, num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator, mesh_shape=(dim_state,), domain_extents=(domain_extent,),", "observation_function spatial_freqs = np.arange(dim_state // 2 + 1) * 2", "spatial domain for laminar flame fronts. This model class represents", "⊛ dW where `s` is the spatial coordinate in a", "of basic equations. Acta Astronomica, 4 (1977) pp. 1177–1206. 3.", "equations. Acta Astronomica, 4 (1977) pp. 1177–1206. 3. Kassam, Aly-Khan", "state_noise_length_scale: float = 1.0, num_roots_of_unity_etdrk4_integrator: int = 16, **kwargs ):", "Fourth-order time-stepping for stiff PDEs. SIAM Journal on Scientific Computing", "Astronomica, 4 (1977) pp. 1177–1206. \"\"\" from typing import Union,", "(1976) pp. 356–369. 2. Sivashinsky. Nonlinear analysis of hydrodynamic instability", "σ * exp(-ωₖ² * ℓ²) * √(M / S) where", "scalar or array of shape `(dim_observation,)`. Noise in each dimension", "time_step self.observation_space_indices = observation_space_indices self.observation_function = observation_function spatial_freqs = np.arange(dim_state", "** 2 - damping_coeff def nonlinear_operator(v, freqs, freqs_sq): return (", "is used to integrate the deterministic component of the SDE", "(`γ` in description above) controlling degree of damping in dynamics.", "equation (SPDE) is dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X", "False ) initial_state_kernel = ( initial_state_amplitude * np.exp(-0.5 * spatial_freqs_sq", "instability in laminar flames I. Derivation of basic equations. Acta", "values correspond to smoother fields. num_roots_of_unity_etdrk4_integrator: Number of roots of", "in Theoretical Physcs, 55 (1976) pp. 356–369. 2. Sivashinsky. Nonlinear", "system of stochastic differential equations (SDEs) in the Fourier coefficients", "initial state. state_noise_amplitude: Amplitude scale parameter for additive state noise", "Kutta integrator. \"\"\" assert dim_state % 2 == 0, \"State", "assert dim_state % 2 == 0, \"State dimension `dim_state` must", "as np from dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn", "(2005): 1214-1233. 4. Cox, <NAME>. and Matthews, <NAME>. Exponential time", "from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, ) class", "Persistent propagation of concentration waves in dissipative media far from", "state_noise_kernel + 1j * state_noise_kernel, False ) initial_state_kernel = (", "Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally chaotic dynamics.", "for stiff PDEs. SIAM Journal on Scientific Computing 26.4 (2005):", "np.pi / domain_extent spatial_freqs_sq = spatial_freqs ** 2 spatial_freqs[dim_state //", "[1, 2] which exhibits spatio-temporally chaotic dynamics. References: 1. Kuramoto", "non-linear terms [3, 4] is used to integrate the deterministic", "..., self.observation_space_indices ] if self.observation_function is None: return subsampled_states else:", "SpatiallyExtendedModelMixIn, OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, FourierLaminarFlameModel, ): \"\"\"Non-linear SPDE model on a periodic", "2 + 1) * 2 * np.pi / domain_extent spatial_freqs_sq", "(SDEs) in the Fourier coefficients X̃ₖ dX̃ₖ = (ωₖ² -", "* fft.rfft(fft.irfft(v, norm=\"ortho\") ** 2, norm=\"ortho\") ) self.integrator = FourierETDRK4Integrator(", "which is equivalent here to number of mesh points in", "Amplitude scale parameter for additive state noise in model dynamics.", "magnitude values for the initial state. state_noise_amplitude: Amplitude scale parameter", "\"\"\" def __init__( self, dim_state: int = 512, observation_space_indices: Union[slice,", "state noise fields. Larger values correspond to smoother fields. num_roots_of_unity_etdrk4_integrator:", "an Euler-Maruyama discretisation used for the Wiener process increment. The", "k / S` the kth spatial frequency and `i` the", "3. Kassam, Aly-Khan and Trefethen, <NAME>. Fourth-order time-stepping for stiff", "dt + κ ⊛ dW where `s` is the spatial", "FourierLaminarFlameModel(AbstractDiagonalGaussianModel): \"\"\"Non-linear SPDE model on a periodic 1D spatial domain", "differential equations (SDEs) in the Fourier coefficients X̃ₖ dX̃ₖ =", "( initial_state_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)", "/ domain_extent) ** 0.5 ) initial_state_std = rfft_coeff_to_real_array( initial_state_kernel +", "2] which exhibits spatio-temporally chaotic dynamics. The governing stochastic partial", "state_noise_std = rfft_coeff_to_real_array( state_noise_kernel + 1j * state_noise_kernel, False )", "`t` the time coordinate, `X(s, t)` the state field process,", "process, `γ` a coefficient controlling the degree of damping in", "None, 8), observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, time_step: float", "coefficient controlling the degree of damping in the dynamics, `W(s,", "damping_coeff: Coefficient (`γ` in description above) controlling degree of damping", "to be independent i.e. a diagonal noise covariance. initial_state_amplitude: Amplitude", "of hydrodynamic instability in laminar flames I. Derivation of basic", "observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, time_step: float = 0.25,", "np from dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn from", "= 16, **kwargs ): \"\"\" Args: dim_state: Dimension of state", "spatial discretization. observation_space_indices: Slice or sequence of integers specifying spatial", "spatio-temporally chaotic dynamics. References: 1. Kuramoto and Tsuzuki. Persistent propagation", "`ℓ` a parameter controlling the length scale. References: 1. Kuramoto", "): \"\"\"Non-linear SPDE model on a periodic 1D spatial domain", "430-455. \"\"\" def __init__( self, dim_state: int = 512, observation_space_indices:", "the degree of damping in the dynamics, `W(s, t)` a", "16, ): \"\"\" Args: dim_state: Dimension of state which is", "- γ) * X̃ₖ + (i * ωₖ / 2)", "= ( initial_state_amplitude * np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale **", "represents the state field by its values at the spatial", "1.0, num_roots_of_unity_etdrk4_integrator: int = 16, **kwargs ): \"\"\" Args: dim_state:", "(dim_state / domain_extent) ** 0.5 ) initial_state_std = rfft_coeff_to_real_array( initial_state_kernel", "Integrator time step. domain_extent: Extent (size) of spatial domain. damping_coeff:", "points. observation_function: Function to apply to subsampled state field to" ]
[ "version='0.1', description='Utiltiy functions for working with Myo Armband data', author='Lif3line',", "# use the URL to the github repo install_requires=[ 'scipy',", "setuptools import setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions for working", "setup(name='myo_helper', version='0.1', description='Utiltiy functions for working with Myo Armband data',", "Armband data.\"\"\" from setuptools import setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy", "with Myo Armband data', author='Lif3line', author_email='<EMAIL>', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', #", "to the github repo install_requires=[ 'scipy', 'sklearn', 'numpy' ], keywords='myo", "\"\"\"Utiltiy functions for working with Myo Armband data.\"\"\" from setuptools", "working with Myo Armband data', author='Lif3line', author_email='<EMAIL>', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper',", "find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions for working with Myo Armband", "data.\"\"\" from setuptools import setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions", "<filename>setup.py \"\"\"Utiltiy functions for working with Myo Armband data.\"\"\" from", "Armband data', author='Lif3line', author_email='<EMAIL>', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the", "author='Lif3line', author_email='<EMAIL>', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the URL to", "the github repo install_requires=[ 'scipy', 'sklearn', 'numpy' ], keywords='myo emg')", "packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo", "Myo Armband data', author='Lif3line', author_email='<EMAIL>', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use", "Myo Armband data.\"\"\" from setuptools import setup, find_packages setup(name='myo_helper', version='0.1',", "functions for working with Myo Armband data', author='Lif3line', author_email='<EMAIL>', license='MIT',", "for working with Myo Armband data', author='Lif3line', author_email='<EMAIL>', license='MIT', packages=find_packages(),", "use the URL to the github repo install_requires=[ 'scipy', 'sklearn',", "functions for working with Myo Armband data.\"\"\" from setuptools import", "description='Utiltiy functions for working with Myo Armband data', author='Lif3line', author_email='<EMAIL>',", "data', author='Lif3line', author_email='<EMAIL>', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the URL", "for working with Myo Armband data.\"\"\" from setuptools import setup,", "setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions for working with Myo", "with Myo Armband data.\"\"\" from setuptools import setup, find_packages setup(name='myo_helper',", "license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the URL to the github", "URL to the github repo install_requires=[ 'scipy', 'sklearn', 'numpy' ],", "working with Myo Armband data.\"\"\" from setuptools import setup, find_packages", "the URL to the github repo install_requires=[ 'scipy', 'sklearn', 'numpy'", "import setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions for working with", "url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo install_requires=[", "author_email='<EMAIL>', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the URL to the", "from setuptools import setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions for" ]
[ "\"age\": i + 10} for i in range(size)] result =", "= { \"/users\": UsersHandler, \"/user-info\": UserInfoHandler } settings = {", "-*-encoding:utf-8-*- import os from karlooper.web.application import Application from karlooper.web.request import", "\"name_%d\" % i, \"gender\": \"male\", \"age\": i + 10} for", "10} for i in range(size)] result = { \"status\": 0,", "karlooper.web.request import Request class UsersHandler(Request): def get(self): return self.render(\"/user-page.html\") class", "\"/users\": UsersHandler, \"/user-info\": UserInfoHandler } settings = { \"template\": os.getcwd()", "# -*-encoding:utf-8-*- import os from karlooper.web.application import Application from karlooper.web.request", "UsersHandler(Request): def get(self): return self.render(\"/user-page.html\") class UserInfoHandler(Request): def post(self): print(self.get_http_request_message())", "} if __name__ == '__main__': application = Application(url_mapping, settings=settings) application.listen(port=8080)", "user_list } return self.response_as_json(result) url_mapping = { \"/users\": UsersHandler, \"/user-info\":", "[{\"name\": \"name_%d\" % i, \"gender\": \"male\", \"age\": i + 10}", "% i, \"gender\": \"male\", \"age\": i + 10} for i", "{ \"/users\": UsersHandler, \"/user-info\": UserInfoHandler } settings = { \"template\":", "UserInfoHandler } settings = { \"template\": os.getcwd() + \"/templates\", \"static\":", "\"/templates\", \"log_enable\": False, \"debug\": True } if __name__ == '__main__':", "int(size) user_list = [{\"name\": \"name_%d\" % i, \"gender\": \"male\", \"age\":", "UsersHandler, \"/user-info\": UserInfoHandler } settings = { \"template\": os.getcwd() +", "False, \"debug\": True } if __name__ == '__main__': application =", "= { \"status\": 0, \"message\": \"OK\", \"data\": user_list } return", "{ \"template\": os.getcwd() + \"/templates\", \"static\": os.getcwd() + \"/templates\", \"log_enable\":", "\"gender\": \"male\", \"age\": i + 10} for i in range(size)]", "= int(size) user_list = [{\"name\": \"name_%d\" % i, \"gender\": \"male\",", "\"template\": os.getcwd() + \"/templates\", \"static\": os.getcwd() + \"/templates\", \"log_enable\": False,", "size = self.get_parameter(\"user_size\", 0) size = int(size) user_list = [{\"name\":", "\"debug\": True } if __name__ == '__main__': application = Application(url_mapping,", "\"/templates\", \"static\": os.getcwd() + \"/templates\", \"log_enable\": False, \"debug\": True }", "\"message\": \"OK\", \"data\": user_list } return self.response_as_json(result) url_mapping = {", "url_mapping = { \"/users\": UsersHandler, \"/user-info\": UserInfoHandler } settings =", "result = { \"status\": 0, \"message\": \"OK\", \"data\": user_list }", "= { \"template\": os.getcwd() + \"/templates\", \"static\": os.getcwd() + \"/templates\",", "from karlooper.web.request import Request class UsersHandler(Request): def get(self): return self.render(\"/user-page.html\")", "post(self): print(self.get_http_request_message()) size = self.get_parameter(\"user_size\", 0) size = int(size) user_list", "\"status\": 0, \"message\": \"OK\", \"data\": user_list } return self.response_as_json(result) url_mapping", "def post(self): print(self.get_http_request_message()) size = self.get_parameter(\"user_size\", 0) size = int(size)", "\"OK\", \"data\": user_list } return self.response_as_json(result) url_mapping = { \"/users\":", "\"log_enable\": False, \"debug\": True } if __name__ == '__main__': application", "get(self): return self.render(\"/user-page.html\") class UserInfoHandler(Request): def post(self): print(self.get_http_request_message()) size =", "Application from karlooper.web.request import Request class UsersHandler(Request): def get(self): return", "os.getcwd() + \"/templates\", \"static\": os.getcwd() + \"/templates\", \"log_enable\": False, \"debug\":", "+ \"/templates\", \"static\": os.getcwd() + \"/templates\", \"log_enable\": False, \"debug\": True", "0) size = int(size) user_list = [{\"name\": \"name_%d\" % i,", "if __name__ == '__main__': application = Application(url_mapping, settings=settings) application.listen(port=8080) application.run()", "for i in range(size)] result = { \"status\": 0, \"message\":", "self.render(\"/user-page.html\") class UserInfoHandler(Request): def post(self): print(self.get_http_request_message()) size = self.get_parameter(\"user_size\", 0)", "self.response_as_json(result) url_mapping = { \"/users\": UsersHandler, \"/user-info\": UserInfoHandler } settings", "os from karlooper.web.application import Application from karlooper.web.request import Request class", "0, \"message\": \"OK\", \"data\": user_list } return self.response_as_json(result) url_mapping =", "def get(self): return self.render(\"/user-page.html\") class UserInfoHandler(Request): def post(self): print(self.get_http_request_message()) size", "os.getcwd() + \"/templates\", \"log_enable\": False, \"debug\": True } if __name__", "in range(size)] result = { \"status\": 0, \"message\": \"OK\", \"data\":", "karlooper.web.application import Application from karlooper.web.request import Request class UsersHandler(Request): def", "\"static\": os.getcwd() + \"/templates\", \"log_enable\": False, \"debug\": True } if", "= [{\"name\": \"name_%d\" % i, \"gender\": \"male\", \"age\": i +", "class UsersHandler(Request): def get(self): return self.render(\"/user-page.html\") class UserInfoHandler(Request): def post(self):", "user_list = [{\"name\": \"name_%d\" % i, \"gender\": \"male\", \"age\": i", "\"/user-info\": UserInfoHandler } settings = { \"template\": os.getcwd() + \"/templates\",", "\"data\": user_list } return self.response_as_json(result) url_mapping = { \"/users\": UsersHandler,", "settings = { \"template\": os.getcwd() + \"/templates\", \"static\": os.getcwd() +", "+ 10} for i in range(size)] result = { \"status\":", "= self.get_parameter(\"user_size\", 0) size = int(size) user_list = [{\"name\": \"name_%d\"", "class UserInfoHandler(Request): def post(self): print(self.get_http_request_message()) size = self.get_parameter(\"user_size\", 0) size", "return self.render(\"/user-page.html\") class UserInfoHandler(Request): def post(self): print(self.get_http_request_message()) size = self.get_parameter(\"user_size\",", "size = int(size) user_list = [{\"name\": \"name_%d\" % i, \"gender\":", "range(size)] result = { \"status\": 0, \"message\": \"OK\", \"data\": user_list", "import Request class UsersHandler(Request): def get(self): return self.render(\"/user-page.html\") class UserInfoHandler(Request):", "i, \"gender\": \"male\", \"age\": i + 10} for i in", "return self.response_as_json(result) url_mapping = { \"/users\": UsersHandler, \"/user-info\": UserInfoHandler }", "import Application from karlooper.web.request import Request class UsersHandler(Request): def get(self):", "True } if __name__ == '__main__': application = Application(url_mapping, settings=settings)", "print(self.get_http_request_message()) size = self.get_parameter(\"user_size\", 0) size = int(size) user_list =", "UserInfoHandler(Request): def post(self): print(self.get_http_request_message()) size = self.get_parameter(\"user_size\", 0) size =", "import os from karlooper.web.application import Application from karlooper.web.request import Request", "Request class UsersHandler(Request): def get(self): return self.render(\"/user-page.html\") class UserInfoHandler(Request): def", "} return self.response_as_json(result) url_mapping = { \"/users\": UsersHandler, \"/user-info\": UserInfoHandler", "+ \"/templates\", \"log_enable\": False, \"debug\": True } if __name__ ==", "} settings = { \"template\": os.getcwd() + \"/templates\", \"static\": os.getcwd()", "from karlooper.web.application import Application from karlooper.web.request import Request class UsersHandler(Request):", "{ \"status\": 0, \"message\": \"OK\", \"data\": user_list } return self.response_as_json(result)", "self.get_parameter(\"user_size\", 0) size = int(size) user_list = [{\"name\": \"name_%d\" %", "i + 10} for i in range(size)] result = {", "i in range(size)] result = { \"status\": 0, \"message\": \"OK\",", "\"male\", \"age\": i + 10} for i in range(size)] result" ]
[ "\"\"\" rand_end = max(0, len(frame_indices) - self.size - 1) begin_index", "class TemporalCenterRandomCrop(object): \"\"\"Temporally crop the given frame indices at a", "i.e. if 120 and 90: = 30 offset = random.randint(-1*int(spacing/2)", "center_index = len(frame_indices) // 2 begin_index = max(0, center_index -", "size): self.size = size def __call__(self, frame_indices): \"\"\" Args: frame_indices", "many times as necessary to satisfy the size. Args: size", "+ self.size, len(frame_indices)) out = frame_indices[begin_index:end_index] for index in out:", "out class TemporalCenterRandomCrop(object): \"\"\"Temporally crop the given frame indices at", "the size, loop the indices as many times as necessary", "len(frame_indices) - self.size - 1) begin_index = random.randint(0, rand_end) end_index", "__call__(self, frame_indices): out = frame_indices[slice(self.begin, self.end+1, self.step)] return out class", "class TemporalCenterCropFlexible(object): def __init__(self, begin=15, step=3, end=108): self.begin = begin", "return out class TemporalCenterCrop(object): \"\"\"Temporally crop the given frame indices", "Args: frame_indices (list): frame indices to be cropped. Returns: list:", "frame_indices): out = frame_indices[slice(self.begin, self.end+1, self.step)] return out class TemporalCenterRandomCrop(object):", "as necessary to satisfy the size. Args: size (int): Desired", "int((len(frame_indices) - self.size)/2) # i.e. if 120 and 90: =", "1) # i.e if 120 and 90, -14 to 14", "int(spacing/2) - 1) # i.e if 120 and 90, -14", "a random location. If the number of frames is less", "len(out) >= self.size: break out.append(index) return out class TemporalRandomCrop(object): \"\"\"Temporally", "Returns: list: Cropped frame indices. \"\"\" center_index = len(frame_indices) //", "be cropped. Returns: list: Cropped frame indices. \"\"\" spacing =", "frame_indices): out = frame_indices[:self.size] for index in out: if len(out)", "(self.size // 2)) end_index = min(begin_index + self.size, len(frame_indices)) out", "= frame_indices[slice(self.begin, self.end+1, self.step)] return out class TemporalCenterRandomCrop(object): \"\"\"Temporally crop", "size): self.size = size def __call__(self, frame_indices): out = frame_indices[:self.size]", "= frame_indices for index in out: if len(out) >= self.size:", "indices. \"\"\" rand_end = max(0, len(frame_indices) - self.size - 1)", "Cropped frame indices. \"\"\" spacing = int((len(frame_indices) - self.size)/2) #", "out class TemporalCenterCropFlexible(object): def __init__(self, begin=15, step=3, end=108): self.begin =", "if len(out) >= self.size: break out.append(index) return out class TemporalCenterCropFlexible(object):", "out.append(index) return out class TemporalRandomCrop(object): \"\"\"Temporally crop the given frame", "int(self.size/2) + offset # i.e. 120: 60 - 45 +", "to be cropped. Returns: list: Cropped frame indices. \"\"\" center_index", "TemporalRandomCrop(object): \"\"\"Temporally crop the given frame indices at a random", "(end - begin) / step + 1 == 32 def", "= size def __call__(self, frame_indices): out = frame_indices[:self.size] for index", "\"\"\"Temporally crop the given frame indices at a random location.", "120: 60 - 45 + offset (-1 to 29) end_index", "len(out) >= self.size: break out.append(index) return out class TemporalBeginCrop(object): \"\"\"Temporally", "= begin_index + self.size out = frame_indices[begin_index:end_index] for index in", "given frame indices at a center. If the number of", "size. Args: size (int): Desired output size of the crop.", "size def __call__(self, frame_indices): \"\"\" Args: frame_indices (list): frame indices", "- self.size - 1) begin_index = random.randint(0, rand_end) end_index =", "class LoopPadding(object): def __init__(self, size): self.size = size def __call__(self,", "out = frame_indices[slice(self.begin, self.end+1, self.step)] return out class TemporalCenterRandomCrop(object): \"\"\"Temporally", "indices. \"\"\" center_index = len(frame_indices) // 2 begin_index = max(0,", "the given frame indices at a random location. If the", "frame indices at a center. If the number of frames", "out class TemporalCenterCrop(object): \"\"\"Temporally crop the given frame indices at", "(list): frame indices to be cropped. Returns: list: Cropped frame", "= step self.end = end assert (end - begin) /", "to be cropped. Returns: list: Cropped frame indices. \"\"\" spacing", "if 120 and 90: = 30 offset = random.randint(-1*int(spacing/2) +", "+ self.size out = frame_indices[begin_index:end_index] for index in out: if", "assert (end - begin) / step + 1 == 32", "- 1) begin_index = random.randint(0, rand_end) end_index = min(begin_index +", "class TemporalBeginCrop(object): \"\"\"Temporally crop the given frame indices at a", "math class LoopPadding(object): def __init__(self, size): self.size = size def", "crop the given frame indices at a random location. If", "frame indices. \"\"\" center_index = len(frame_indices) // 2 begin_index =", "frame_indices): \"\"\" Args: frame_indices (list): frame indices to be cropped.", "LoopPadding(object): def __init__(self, size): self.size = size def __call__(self, frame_indices):", "break out.append(index) return out class TemporalCenterCrop(object): \"\"\"Temporally crop the given", "= len(frame_indices) // 2 begin_index = max(0, center_index - (self.size", "// 2 begin_index = max(0, center_index - (self.size // 2))", "+ 1 == 32 def __call__(self, frame_indices): out = frame_indices[slice(self.begin,", "location. If the number of frames is less than the", "offset (-1 to 29) end_index = begin_index + self.size out", "frame_indices): out = frame_indices for index in out: if len(out)", "# i.e. if 120 and 90: = 30 offset =", "# i.e. 120: 60 - 45 + offset (-1 to", "out class TemporalBeginCrop(object): \"\"\"Temporally crop the given frame indices at", "is less than the size, loop the indices as many", "= random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) # i.e if", "class TemporalCenterCrop(object): \"\"\"Temporally crop the given frame indices at a", "output size of the crop. \"\"\" def __init__(self, size): self.size", "random import math class LoopPadding(object): def __init__(self, size): self.size =", "__init__(self, size): self.size = size def __call__(self, frame_indices): \"\"\" Args:", "self.size: break out.append(index) return out class TemporalRandomCrop(object): \"\"\"Temporally crop the", "out = frame_indices[:self.size] for index in out: if len(out) >=", "__init__(self, begin=15, step=3, end=108): self.begin = begin self.step = step", "out.append(index) return out class TemporalBeginCrop(object): \"\"\"Temporally crop the given frame", "indices at a center. If the number of frames is", "/ step + 1 == 32 def __call__(self, frame_indices): out", "= int(len(frame_indices)/2) - int(self.size/2) + offset # i.e. 120: 60", "step=3, end=108): self.begin = begin self.step = step self.end =", "Args: size (int): Desired output size of the crop. \"\"\"", "if len(out) >= self.size: break out.append(index) return out class TemporalCenterCrop(object):", "Desired output size of the crop. \"\"\" def __init__(self, size):", "1) begin_index = random.randint(0, rand_end) end_index = min(begin_index + self.size,", "if 120 and 90, -14 to 14 begin_index = int(len(frame_indices)/2)", "and 90: = 30 offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2)", "Returns: list: Cropped frame indices. \"\"\" rand_end = max(0, len(frame_indices)", "step + 1 == 32 def __call__(self, frame_indices): out =", "frame_indices[begin_index:end_index] for index in out: if len(out) >= self.size: break", "of frames is less than the size, loop the indices", "\"\"\" def __init__(self, size): self.size = size def __call__(self, frame_indices):", "1 == 32 def __call__(self, frame_indices): out = frame_indices[slice(self.begin, self.end+1,", "a center. If the number of frames is less than", "= 30 offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1)", "begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset # i.e. 120:", "crop the given frame indices at a beginning. If the", "def __init__(self, begin=15, step=3, end=108): self.begin = begin self.step =", "45 + offset (-1 to 29) end_index = begin_index +", "index in out: if len(out) >= self.size: break out.append(index) return", "beginning. If the number of frames is less than the", "i.e if 120 and 90, -14 to 14 begin_index =", "return out class TemporalCenterCropFlexible(object): def __init__(self, begin=15, step=3, end=108): self.begin", "return out class TemporalRandomCrop(object): \"\"\"Temporally crop the given frame indices", "Returns: list: Cropped frame indices. \"\"\" spacing = int((len(frame_indices) -", "of the crop. \"\"\" def __init__(self, size): self.size = size", "at a random location. If the number of frames is", "- begin) / step + 1 == 32 def __call__(self,", "1, int(spacing/2) - 1) # i.e if 120 and 90,", "break out.append(index) return out class TemporalRandomCrop(object): \"\"\"Temporally crop the given", "self.size: break out.append(index) return out class TemporalBeginCrop(object): \"\"\"Temporally crop the", "90, -14 to 14 begin_index = int(len(frame_indices)/2) - int(self.size/2) +", "= frame_indices[begin_index:end_index] for index in out: if len(out) >= self.size:", "if len(out) >= self.size: break out.append(index) return out class TemporalRandomCrop(object):", "begin_index = max(0, center_index - (self.size // 2)) end_index =", "size, loop the indices as many times as necessary to", "len(frame_indices) // 2 begin_index = max(0, center_index - (self.size //", "def __call__(self, frame_indices): out = frame_indices[:self.size] for index in out:", "loop the indices as many times as necessary to satisfy", "to 14 begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset #", "\"\"\" spacing = int((len(frame_indices) - self.size)/2) # i.e. if 120", "self.size = size def __call__(self, frame_indices): out = frame_indices for", "less than the size, loop the indices as many times", "== 32 def __call__(self, frame_indices): out = frame_indices[slice(self.begin, self.end+1, self.step)]", "def __init__(self, size): self.size = size def __call__(self, frame_indices): out", "the number of frames is less than the size, loop", "at a beginning. If the number of frames is less", "- (self.size // 2)) end_index = min(begin_index + self.size, len(frame_indices))", "indices at a random location. If the number of frames", "32 def __call__(self, frame_indices): out = frame_indices[slice(self.begin, self.end+1, self.step)] return", "to be cropped. Returns: list: Cropped frame indices. \"\"\" rand_end", "to satisfy the size. Args: size (int): Desired output size", "number of frames is less than the size, loop the", "len(out) >= self.size: break out.append(index) return out class TemporalCenterCropFlexible(object): def", "120 and 90: = 30 offset = random.randint(-1*int(spacing/2) + 1,", "14 begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset # i.e.", "step self.end = end assert (end - begin) / step", "2 begin_index = max(0, center_index - (self.size // 2)) end_index", "def __call__(self, frame_indices): \"\"\" Args: frame_indices (list): frame indices to", "+ 1, int(spacing/2) - 1) # i.e if 120 and", "crop. \"\"\" def __init__(self, size): self.size = size def __call__(self,", "= max(0, center_index - (self.size // 2)) end_index = min(begin_index", "be cropped. Returns: list: Cropped frame indices. \"\"\" rand_end =", "= random.randint(0, rand_end) end_index = min(begin_index + self.size, len(frame_indices)) out", "out.append(index) return out class TemporalCenterCropFlexible(object): def __init__(self, begin=15, step=3, end=108):", "Cropped frame indices. \"\"\" center_index = len(frame_indices) // 2 begin_index", "= frame_indices[:self.size] for index in out: if len(out) >= self.size:", "self.end+1, self.step)] return out class TemporalCenterRandomCrop(object): \"\"\"Temporally crop the given", "indices at a beginning. If the number of frames is", "90: = 30 offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) -", "to 29) end_index = begin_index + self.size out = frame_indices[begin_index:end_index]", "import random import math class LoopPadding(object): def __init__(self, size): self.size", "frame indices at a random location. If the number of", "as many times as necessary to satisfy the size. Args:", "out = frame_indices for index in out: if len(out) >=", "max(0, len(frame_indices) - self.size - 1) begin_index = random.randint(0, rand_end)", "- int(self.size/2) + offset # i.e. 120: 60 - 45", "(int): Desired output size of the crop. \"\"\" def __init__(self,", "- 1) # i.e if 120 and 90, -14 to", "rand_end = max(0, len(frame_indices) - self.size - 1) begin_index =", "TemporalCenterCrop(object): \"\"\"Temporally crop the given frame indices at a center.", "return out class TemporalBeginCrop(object): \"\"\"Temporally crop the given frame indices", "end_index = min(begin_index + self.size, len(frame_indices)) out = frame_indices[begin_index:end_index] for", "frame_indices for index in out: if len(out) >= self.size: break", "given frame indices at a random location. If the number", "- 45 + offset (-1 to 29) end_index = begin_index", "than the size, loop the indices as many times as", "+ offset (-1 to 29) end_index = begin_index + self.size", "def __call__(self, frame_indices): out = frame_indices for index in out:", "the indices as many times as necessary to satisfy the", "out.append(index) return out class TemporalCenterCrop(object): \"\"\"Temporally crop the given frame", "30 offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) #", "max(0, center_index - (self.size // 2)) end_index = min(begin_index +", "import math class LoopPadding(object): def __init__(self, size): self.size = size", "size): self.size = size def __call__(self, frame_indices): out = frame_indices", "2)) end_index = min(begin_index + self.size, len(frame_indices)) out = frame_indices[begin_index:end_index]", "size def __call__(self, frame_indices): out = frame_indices for index in", "random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) # i.e if 120", "offset # i.e. 120: 60 - 45 + offset (-1", "self.size out = frame_indices[begin_index:end_index] for index in out: if len(out)", "satisfy the size. Args: size (int): Desired output size of", "self.size: break out.append(index) return out class TemporalCenterCrop(object): \"\"\"Temporally crop the", "120 and 90, -14 to 14 begin_index = int(len(frame_indices)/2) -", "-14 to 14 begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset", "cropped. Returns: list: Cropped frame indices. \"\"\" spacing = int((len(frame_indices)", "cropped. Returns: list: Cropped frame indices. \"\"\" center_index = len(frame_indices)", "size of the crop. \"\"\" def __init__(self, size): self.size =", "Cropped frame indices. \"\"\" rand_end = max(0, len(frame_indices) - self.size", "if len(out) >= self.size: break out.append(index) return out class TemporalBeginCrop(object):", ">= self.size: break out.append(index) return out class TemporalCenterCropFlexible(object): def __init__(self,", "self.begin = begin self.step = step self.end = end assert", "len(out) >= self.size: break out.append(index) return out class TemporalCenterCrop(object): \"\"\"Temporally", "necessary to satisfy the size. Args: size (int): Desired output", "list: Cropped frame indices. \"\"\" center_index = len(frame_indices) // 2", "break out.append(index) return out class TemporalBeginCrop(object): \"\"\"Temporally crop the given", "__init__(self, size): self.size = size def __call__(self, frame_indices): out =", "the size. Args: size (int): Desired output size of the", "= begin self.step = step self.end = end assert (end", "list: Cropped frame indices. \"\"\" rand_end = max(0, len(frame_indices) -", "begin=15, step=3, end=108): self.begin = begin self.step = step self.end", "end=108): self.begin = begin self.step = step self.end = end", "self.step = step self.end = end assert (end - begin)", "at a center. If the number of frames is less", "frame indices at a beginning. If the number of frames", ">= self.size: break out.append(index) return out class TemporalCenterCrop(object): \"\"\"Temporally crop", "center. If the number of frames is less than the", "begin self.step = step self.end = end assert (end -", "TemporalCenterRandomCrop(object): \"\"\"Temporally crop the given frame indices at a random", "a beginning. If the number of frames is less than", "frame indices. \"\"\" rand_end = max(0, len(frame_indices) - self.size -", "29) end_index = begin_index + self.size out = frame_indices[begin_index:end_index] for", "= size def __call__(self, frame_indices): out = frame_indices for index", "self.size: break out.append(index) return out class TemporalCenterCropFlexible(object): def __init__(self, begin=15,", "__call__(self, frame_indices): \"\"\" Args: frame_indices (list): frame indices to be", "end_index = begin_index + self.size out = frame_indices[begin_index:end_index] for index", "# i.e if 120 and 90, -14 to 14 begin_index", "= int((len(frame_indices) - self.size)/2) # i.e. if 120 and 90:", "indices. \"\"\" spacing = int((len(frame_indices) - self.size)/2) # i.e. if", "i.e. 120: 60 - 45 + offset (-1 to 29)", "= max(0, len(frame_indices) - self.size - 1) begin_index = random.randint(0,", "rand_end) end_index = min(begin_index + self.size, len(frame_indices)) out = frame_indices[begin_index:end_index]", "self.size = size def __call__(self, frame_indices): out = frame_indices[:self.size] for", "= size def __call__(self, frame_indices): \"\"\" Args: frame_indices (list): frame", "out: if len(out) >= self.size: break out.append(index) return out class", "break out.append(index) return out class TemporalCenterCropFlexible(object): def __init__(self, begin=15, step=3,", "def __init__(self, size): self.size = size def __call__(self, frame_indices): \"\"\"", "len(frame_indices)) out = frame_indices[begin_index:end_index] for index in out: if len(out)", "= end assert (end - begin) / step + 1", "int(len(frame_indices)/2) - int(self.size/2) + offset # i.e. 120: 60 -", "TemporalBeginCrop(object): \"\"\"Temporally crop the given frame indices at a beginning.", "out class TemporalRandomCrop(object): \"\"\"Temporally crop the given frame indices at", "class TemporalRandomCrop(object): \"\"\"Temporally crop the given frame indices at a", "60 - 45 + offset (-1 to 29) end_index =", "the given frame indices at a center. If the number", "cropped. Returns: list: Cropped frame indices. \"\"\" rand_end = max(0,", "frame_indices (list): frame indices to be cropped. Returns: list: Cropped", "end assert (end - begin) / step + 1 ==", "offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) # i.e", "frame indices. \"\"\" spacing = int((len(frame_indices) - self.size)/2) # i.e.", "\"\"\"Temporally crop the given frame indices at a beginning. If", "be cropped. Returns: list: Cropped frame indices. \"\"\" center_index =", "random location. If the number of frames is less than", "// 2)) end_index = min(begin_index + self.size, len(frame_indices)) out =", "size def __call__(self, frame_indices): out = frame_indices[:self.size] for index in", "times as necessary to satisfy the size. Args: size (int):", "\"\"\" center_index = len(frame_indices) // 2 begin_index = max(0, center_index", "= min(begin_index + self.size, len(frame_indices)) out = frame_indices[begin_index:end_index] for index", "out = frame_indices[begin_index:end_index] for index in out: if len(out) >=", "+ offset # i.e. 120: 60 - 45 + offset", "begin_index + self.size out = frame_indices[begin_index:end_index] for index in out:", "indices as many times as necessary to satisfy the size.", "frame_indices[:self.size] for index in out: if len(out) >= self.size: break", "size (int): Desired output size of the crop. \"\"\" def", "def __call__(self, frame_indices): out = frame_indices[slice(self.begin, self.end+1, self.step)] return out", "for index in out: if len(out) >= self.size: break out.append(index)", "If the number of frames is less than the size,", "the crop. \"\"\" def __init__(self, size): self.size = size def", "\"\"\"Temporally crop the given frame indices at a center. If", "self.size = size def __call__(self, frame_indices): \"\"\" Args: frame_indices (list):", "\"\"\" Args: frame_indices (list): frame indices to be cropped. Returns:", "TemporalCenterCropFlexible(object): def __init__(self, begin=15, step=3, end=108): self.begin = begin self.step", "self.end = end assert (end - begin) / step +", "frame_indices[slice(self.begin, self.end+1, self.step)] return out class TemporalCenterRandomCrop(object): \"\"\"Temporally crop the", "self.step)] return out class TemporalCenterRandomCrop(object): \"\"\"Temporally crop the given frame", "__call__(self, frame_indices): out = frame_indices for index in out: if", "return out class TemporalCenterRandomCrop(object): \"\"\"Temporally crop the given frame indices", "- self.size)/2) # i.e. if 120 and 90: = 30", "self.size, len(frame_indices)) out = frame_indices[begin_index:end_index] for index in out: if", "indices to be cropped. Returns: list: Cropped frame indices. \"\"\"", "and 90, -14 to 14 begin_index = int(len(frame_indices)/2) - int(self.size/2)", "center_index - (self.size // 2)) end_index = min(begin_index + self.size,", "in out: if len(out) >= self.size: break out.append(index) return out", ">= self.size: break out.append(index) return out class TemporalBeginCrop(object): \"\"\"Temporally crop", "begin_index = random.randint(0, rand_end) end_index = min(begin_index + self.size, len(frame_indices))", "frame indices to be cropped. Returns: list: Cropped frame indices.", "given frame indices at a beginning. If the number of", "crop the given frame indices at a center. If the", "self.size)/2) # i.e. if 120 and 90: = 30 offset", "the given frame indices at a beginning. If the number", "__call__(self, frame_indices): out = frame_indices[:self.size] for index in out: if", "self.size - 1) begin_index = random.randint(0, rand_end) end_index = min(begin_index", "spacing = int((len(frame_indices) - self.size)/2) # i.e. if 120 and", "(-1 to 29) end_index = begin_index + self.size out =", "list: Cropped frame indices. \"\"\" spacing = int((len(frame_indices) - self.size)/2)", "frames is less than the size, loop the indices as", "min(begin_index + self.size, len(frame_indices)) out = frame_indices[begin_index:end_index] for index in", "random.randint(0, rand_end) end_index = min(begin_index + self.size, len(frame_indices)) out =", "begin) / step + 1 == 32 def __call__(self, frame_indices):", ">= self.size: break out.append(index) return out class TemporalRandomCrop(object): \"\"\"Temporally crop" ]
[ "process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs) return 0 if success else", "help='kill by service id instead of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout',", "_, __): \"\"\"Kills the service(s) using the given token name.\"\"\"", "register(add_parser): \"\"\"Adds this sub-command's parser and returns the action function\"\"\"", "id instead of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in", "= args.get('is-service-id', False) force_flag = args.get('force', False) timeout_secs = args['timeout']", "of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in seconds) for", "timeout_secs = args['timeout'] success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs)", "this sub-command's parser and returns the action function\"\"\" parser =", "using the given token name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id", "waiter.util import guard_no_cluster, check_positive def kill(clusters, args, _, __): \"\"\"Kills", "from waiter.action import process_kill_request from waiter.util import guard_no_cluster, check_positive def", "by service id instead of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t',", "force_flag, timeout_secs) return 0 if success else 1 def register(add_parser):", "= args.get('token-or-service-id') is_service_id = args.get('is-service-id', False) force_flag = args.get('force', False)", "__): \"\"\"Kills the service(s) using the given token name.\"\"\" guard_no_cluster(clusters)", "process_kill_request from waiter.util import guard_no_cluster, check_positive def kill(clusters, args, _,", "args['timeout'] success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs) return 0", "parser = add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all", "def kill(clusters, args, _, __): \"\"\"Kills the service(s) using the", "= args.get('force', False) timeout_secs = args['timeout'] success = process_kill_request(clusters, token_name_or_service_id,", "False) force_flag = args.get('force', False) timeout_secs = args['timeout'] success =", "and returns the action function\"\"\" parser = add_parser('kill', help='kill services')", "parser and returns the action function\"\"\" parser = add_parser('kill', help='kill", "is_service_id = args.get('is-service-id', False) force_flag = args.get('force', False) timeout_secs =", "check_positive def kill(clusters, args, _, __): \"\"\"Kills the service(s) using", "= args['timeout'] success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs) return", "kill(clusters, args, _, __): \"\"\"Kills the service(s) using the given", "guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id = args.get('is-service-id', False) force_flag =", "all services, never prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill by", "service(s) using the given token name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id')", "token_name_or_service_id, is_service_id, force_flag, timeout_secs) return 0 if success else 1", "(in seconds) for kill to complete', type=check_positive, default=30) return kill", "add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all services, never", "'-s', help='kill by service id instead of token', dest='is-service-id', action='store_true')", "returns the action function\"\"\" parser = add_parser('kill', help='kill services') parser.add_argument('token-or-service-id')", "waiter.action import process_kill_request from waiter.util import guard_no_cluster, check_positive def kill(clusters,", "token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill", "parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete', type=check_positive,", "dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to", "action='store_true') parser.add_argument('--service-id', '-s', help='kill by service id instead of token',", "never prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill by service id", "from waiter.util import guard_no_cluster, check_positive def kill(clusters, args, _, __):", "the given token name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id =", "action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete',", "force_flag = args.get('force', False) timeout_secs = args['timeout'] success = process_kill_request(clusters,", "prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill by service id instead", "= process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs) return 0 if success", "success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs) return 0 if", "name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id = args.get('is-service-id', False) force_flag", "args, _, __): \"\"\"Kills the service(s) using the given token", "import process_kill_request from waiter.util import guard_no_cluster, check_positive def kill(clusters, args,", "success else 1 def register(add_parser): \"\"\"Adds this sub-command's parser and", "parser.add_argument('--service-id', '-s', help='kill by service id instead of token', dest='is-service-id',", "service id instead of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout", "'-t', help='timeout (in seconds) for kill to complete', type=check_positive, default=30)", "if success else 1 def register(add_parser): \"\"\"Adds this sub-command's parser", "'-f', help='kill all services, never prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s',", "services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force',", "help='timeout (in seconds) for kill to complete', type=check_positive, default=30) return", "token name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id = args.get('is-service-id', False)", "services, never prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill by service", "function\"\"\" parser = add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill", "the action function\"\"\" parser = add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force',", "sub-command's parser and returns the action function\"\"\" parser = add_parser('kill',", "False) timeout_secs = args['timeout'] success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag,", "args.get('token-or-service-id') is_service_id = args.get('is-service-id', False) force_flag = args.get('force', False) timeout_secs", "timeout_secs) return 0 if success else 1 def register(add_parser): \"\"\"Adds", "else 1 def register(add_parser): \"\"\"Adds this sub-command's parser and returns", "args.get('force', False) timeout_secs = args['timeout'] success = process_kill_request(clusters, token_name_or_service_id, is_service_id,", "action function\"\"\" parser = add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f',", "dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill by service id instead of", "\"\"\"Adds this sub-command's parser and returns the action function\"\"\" parser", "def register(add_parser): \"\"\"Adds this sub-command's parser and returns the action", "import guard_no_cluster, check_positive def kill(clusters, args, _, __): \"\"\"Kills the", "0 if success else 1 def register(add_parser): \"\"\"Adds this sub-command's", "return 0 if success else 1 def register(add_parser): \"\"\"Adds this", "= add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all services,", "\"\"\"Kills the service(s) using the given token name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id", "help='kill all services, never prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill", "1 def register(add_parser): \"\"\"Adds this sub-command's parser and returns the", "args.get('is-service-id', False) force_flag = args.get('force', False) timeout_secs = args['timeout'] success", "instead of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in seconds)", "given token name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id = args.get('is-service-id',", "guard_no_cluster, check_positive def kill(clusters, args, _, __): \"\"\"Kills the service(s)", "is_service_id, force_flag, timeout_secs) return 0 if success else 1 def", "parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true')", "the service(s) using the given token name.\"\"\" guard_no_cluster(clusters) token_name_or_service_id =", "help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all services, never prompt',", "parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true') parser.add_argument('--service-id',", "token_name_or_service_id = args.get('token-or-service-id') is_service_id = args.get('is-service-id', False) force_flag = args.get('force'," ]
[ "sys.exit(1) def check_system(api): \"\"\"Check if all system endpoints are available", "check_auth_token(api) def show_version(): \"\"\"Show A2T version.\"\"\" print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR))", "= time() tags = cfg[\"tags\"] start_tests(cfg, tests, tags, component_analysis, stack_analysis)", "== \"__main__\": # execute only if run as a script", "tags, component_analysis, stack_analysis) t2 = time() log.info(\"Start time: {}\".format(t1)) log.info(\"End", "= setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg[\"access_token\"],", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU", "time() tags = cfg[\"tags\"] start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2", "if api.check_auth_token_validity(): log.success(\"ok\") else: log.error(\"Fatal: wrong token(?)\") sys.exit(1) def check_system(api):", "__name__ == \"__main__\": # execute only if run as a", "csv_reader import read_csv_as_dicts from setup import setup from cliargs import", "= StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg[\"input_file\"])", "This program is free software: you can redistribute it and/or", "See the GNU General Public License for more details. You", "cfg[\"access_token\"], cfg[\"user_key\"], True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg[\"input_file\"]) except Exception", "log.info(\"Checking: authorization token for the core API\") with log.indent(): if", "under the terms of the GNU General Public License as", "check_system(component_analysis) try: tests = read_csv_as_dicts(cfg[\"input_file\"]) except Exception as e: log.error(\"Test", "import time from fastlog import log from csv_reader import read_csv_as_dicts", "True) stack_analysis = StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) check_system(component_analysis) try: tests", "token for the core API.\"\"\" log.info(\"Checking: authorization token for the", "can redistribute it and/or modify it under the terms of", "read\") log.error(e) sys.exit(0) t1 = time() tags = cfg[\"tags\"] start_tests(cfg,", "the Analytics API Load Tests.\"\"\" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if", "core API endpoint\") with log.indent(): if not api.is_api_running(): log.error(\"Fatal: tested", "tool. Copyright (c) 2019 Red Hat Inc. This program is", "it will be useful, but WITHOUT ANY WARRANTY; without even", "import StackAnalysis from test_runner import start_tests # current version of", "the GNU General Public License for more details. You should", "as published by the Free Software Foundation, either version 3", "log.success(\"ok\") else: log.error(\"Fatal: wrong token(?)\") sys.exit(1) def check_system(api): \"\"\"Check if", "are available and that tokens are valid.\"\"\" # try to", "either version 3 of the License, or (at your option)", "from fastlog import log from csv_reader import read_csv_as_dicts from setup", "it and/or modify it under the terms of the GNU", "= time() log.info(\"Start time: {}\".format(t1)) log.info(\"End time: {}\".format(t2)) log.info(\"Duration: {}\".format(t2", "of the GNU General Public License as published by the", "the core API.\"\"\" log.info(\"Checking: authorization token for the core API\")", "Public License along with this program. If not, see <http://www.gnu.org/licenses/>.", "API\") with log.indent(): if api.check_auth_token_validity(): log.success(\"ok\") else: log.error(\"Fatal: wrong token(?)\")", "GNU General Public License along with this program. If not,", "Load Tests tool. Copyright (c) 2019 Red Hat Inc. This", "the Free Software Foundation, either version 3 of the License,", "cli_arguments.version: show_version() sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL',", "valid.\"\"\" # try to access system endpoints log.info(\"System check\") with", "API.\"\"\" log.info(\"Checking: authorization token for the core API\") with log.indent():", "True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg[\"input_file\"]) except Exception as e:", "= 1 VERSION_MINOR = 0 def check_api_endpoint(api): \"\"\"Check that some", "try to access system endpoints log.info(\"System check\") with log.indent(): check_api_endpoint(api)", "General Public License along with this program. If not, see", "General Public License for more details. You should have received", "free software: you can redistribute it and/or modify it under", "with log.indent(): if api.check_auth_token_validity(): log.success(\"ok\") else: log.error(\"Fatal: wrong token(?)\") sys.exit(1)", "import sys import os from time import time from fastlog", "time from fastlog import log from csv_reader import read_csv_as_dicts from", "and that tokens are valid.\"\"\" # try to access system", "Red Hat Inc. This program is free software: you can", "# try to access system endpoints log.info(\"System check\") with log.indent():", "from cliargs import cli_parser from component_analysis import ComponentAnalysis from stack_analysis", "component_analysis = ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) stack_analysis = StackAnalysis(coreapi_url, cfg[\"access_token\"],", "\"\"\"Check if all system endpoints are available and that tokens", "warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "- t1)) if __name__ == \"__main__\": # execute only if", "that it will be useful, but WITHOUT ANY WARRANTY; without", "3 of the License, or (at your option) any later", "of this tool VERSION_MAJOR = 1 VERSION_MINOR = 0 def", "will be useful, but WITHOUT ANY WARRANTY; without even the", "or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General", "token(?)\") sys.exit(1) def check_system(api): \"\"\"Check if all system endpoints are", "import start_tests # current version of this tool VERSION_MAJOR =", "API endpoint\") with log.indent(): if not api.is_api_running(): log.error(\"Fatal: tested system", "time import time from fastlog import log from csv_reader import", "\"__main__\": # execute only if run as a script main()", "Analytics API Load Tests tool. Copyright (c) 2019 Red Hat", "Public License for more details. You should have received a", "see <http://www.gnu.org/licenses/>. \"\"\" import sys import os from time import", "log.info(\"Duration: {}\".format(t2 - t1)) if __name__ == \"__main__\": # execute", "<gh_stars>0 \"\"\"The main module of the Analytics API Load Tests", "\"\"\"Check the authorization token for the core API.\"\"\" log.info(\"Checking: authorization", "the hope that it will be useful, but WITHOUT ANY", "of the GNU General Public License along with this program.", "cfg[\"access_token\"], cfg[\"user_key\"], True) stack_analysis = StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) check_system(component_analysis)", "log.error(\"Test description can not be read\") log.error(e) sys.exit(0) t1 =", "terms of the GNU General Public License as published by", "along with this program. If not, see <http://www.gnu.org/licenses/>. \"\"\" import", "of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "version 3 of the License, or (at your option) any", "from stack_analysis import StackAnalysis from test_runner import start_tests # current", "WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY", "t2 = time() log.info(\"Start time: {}\".format(t1)) log.info(\"End time: {}\".format(t2)) log.info(\"Duration:", "ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) stack_analysis = StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True)", "modify it under the terms of the GNU General Public", "License as published by the Free Software Foundation, either version", "that tokens are valid.\"\"\" # try to access system endpoints", "core API\") with log.indent(): if api.check_auth_token_validity(): log.success(\"ok\") else: log.error(\"Fatal: wrong", "are valid.\"\"\" # try to access system endpoints log.info(\"System check\")", "program. If not, see <http://www.gnu.org/licenses/>. \"\"\" import sys import os", "log from csv_reader import read_csv_as_dicts from setup import setup from", "ComponentAnalysis from stack_analysis import StackAnalysis from test_runner import start_tests #", "not available\") sys.exit(1) else: log.success(\"ok\") def check_auth_token(api): \"\"\"Check the authorization", "def check_system(api): \"\"\"Check if all system endpoints are available and", "sys.exit(0) t1 = time() tags = cfg[\"tags\"] start_tests(cfg, tests, tags,", "be read\") log.error(e) sys.exit(0) t1 = time() tags = cfg[\"tags\"]", "ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or", "Analytics API Load Tests.\"\"\" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version:", "{}\".format(t1)) log.info(\"End time: {}\".format(t2)) log.info(\"Duration: {}\".format(t2 - t1)) if __name__", "import ComponentAnalysis from stack_analysis import StackAnalysis from test_runner import start_tests", "StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg[\"input_file\"]) except", "import cli_parser from component_analysis import ComponentAnalysis from stack_analysis import StackAnalysis", "received a copy of the GNU General Public License along", "check_system(api): \"\"\"Check if all system endpoints are available and that", "option) any later version. This program is distributed in the", "GNU General Public License for more details. You should have", "cliargs import cli_parser from component_analysis import ComponentAnalysis from stack_analysis import", "<http://www.gnu.org/licenses/>. \"\"\" import sys import os from time import time", "description can not be read\") log.error(e) sys.exit(0) t1 = time()", "License for more details. You should have received a copy", "import log from csv_reader import read_csv_as_dicts from setup import setup", "tests, tags, component_analysis, stack_analysis) t2 = time() log.info(\"Start time: {}\".format(t1))", "not be read\") log.error(e) sys.exit(0) t1 = time() tags =", "api.is_api_running(): log.error(\"Fatal: tested system is not available\") sys.exit(1) else: log.success(\"ok\")", "VERSION_MAJOR = 1 VERSION_MINOR = 0 def check_api_endpoint(api): \"\"\"Check that", "log.info(\"Checking: core API endpoint\") with log.indent(): if not api.is_api_running(): log.error(\"Fatal:", "from test_runner import start_tests # current version of this tool", "is free software: you can redistribute it and/or modify it", "show_version() sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)", "Copyright (c) 2019 Red Hat Inc. This program is free", "start_tests # current version of this tool VERSION_MAJOR = 1", "if __name__ == \"__main__\": # execute only if run as", "If not, see <http://www.gnu.org/licenses/>. \"\"\" import sys import os from", "available\") sys.exit(1) else: log.success(\"ok\") def check_auth_token(api): \"\"\"Check the authorization token", "component_analysis, stack_analysis) t2 = time() log.info(\"Start time: {}\".format(t1)) log.info(\"End time:", "a copy of the GNU General Public License along with", "redistribute it and/or modify it under the terms of the", "the GNU General Public License along with this program. If", "License, or (at your option) any later version. This program", "with log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version(): \"\"\"Show A2T version.\"\"\" print(\"A2T", "all system endpoints are available and that tokens are valid.\"\"\"", "log.error(\"Fatal: tested system is not available\") sys.exit(1) else: log.success(\"ok\") def", "this program. If not, see <http://www.gnu.org/licenses/>. \"\"\" import sys import", "Free Software Foundation, either version 3 of the License, or", "PARTICULAR PURPOSE. See the GNU General Public License for more", "fastlog import log from csv_reader import read_csv_as_dicts from setup import", "This program is distributed in the hope that it will", "version of this tool VERSION_MAJOR = 1 VERSION_MINOR = 0", "that some API endpoint is callable.\"\"\" log.info(\"Checking: core API endpoint\")", "is not available\") sys.exit(1) else: log.success(\"ok\") def check_auth_token(api): \"\"\"Check the", "some API endpoint is callable.\"\"\" log.info(\"Checking: core API endpoint\") with", "the GNU General Public License as published by the Free", "t1)) if __name__ == \"__main__\": # execute only if run", "Tests.\"\"\" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else:", "with log.indent(): if not api.is_api_running(): log.error(\"Fatal: tested system is not", "cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url", "GNU General Public License as published by the Free Software", "stack_analysis) t2 = time() log.info(\"Start time: {}\".format(t1)) log.info(\"End time: {}\".format(t2))", "from setup import setup from cliargs import cli_parser from component_analysis", "core API.\"\"\" log.info(\"Checking: authorization token for the core API\") with", "Load Tests.\"\"\" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0)", "of the Analytics API Load Tests tool. Copyright (c) 2019", "hope that it will be useful, but WITHOUT ANY WARRANTY;", "it under the terms of the GNU General Public License", "module of the Analytics API Load Tests tool. Copyright (c)", "log.indent(): if not api.is_api_running(): log.error(\"Fatal: tested system is not available\")", "program is distributed in the hope that it will be", "more details. You should have received a copy of the", "Software Foundation, either version 3 of the License, or (at", "= read_csv_as_dicts(cfg[\"input_file\"]) except Exception as e: log.error(\"Test description can not", "log.indent(): if api.check_auth_token_validity(): log.success(\"ok\") else: log.error(\"Fatal: wrong token(?)\") sys.exit(1) def", "for the core API\") with log.indent(): if api.check_auth_token_validity(): log.success(\"ok\") else:", "endpoint\") with log.indent(): if not api.is_api_running(): log.error(\"Fatal: tested system is", "os from time import time from fastlog import log from", "import os from time import time from fastlog import log", "log.info(\"Start time: {}\".format(t1)) log.info(\"End time: {}\".format(t2)) log.info(\"Duration: {}\".format(t2 - t1))", "(at your option) any later version. This program is distributed", "the License, or (at your option) any later version. This", "= cfg[\"tags\"] start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2 = time()", "Public License as published by the Free Software Foundation, either", "os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) stack_analysis =", "VERSION_MINOR = 0 def check_api_endpoint(api): \"\"\"Check that some API endpoint", "PURPOSE. See the GNU General Public License for more details.", "is callable.\"\"\" log.info(\"Checking: core API endpoint\") with log.indent(): if not", "the Analytics API Load Tests tool. Copyright (c) 2019 Red", "cli_parser from component_analysis import ComponentAnalysis from stack_analysis import StackAnalysis from", "as e: log.error(\"Test description can not be read\") log.error(e) sys.exit(0)", "can not be read\") log.error(e) sys.exit(0) t1 = time() tags", "try: tests = read_csv_as_dicts(cfg[\"input_file\"]) except Exception as e: log.error(\"Test description", "any later version. This program is distributed in the hope", "t1 = time() tags = cfg[\"tags\"] start_tests(cfg, tests, tags, component_analysis,", "time() log.info(\"Start time: {}\".format(t1)) log.info(\"End time: {}\".format(t2)) log.info(\"Duration: {}\".format(t2 -", "token for the core API\") with log.indent(): if api.check_auth_token_validity(): log.success(\"ok\")", "= 0 def check_api_endpoint(api): \"\"\"Check that some API endpoint is", "= ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) stack_analysis = StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"],", "not api.is_api_running(): log.error(\"Fatal: tested system is not available\") sys.exit(1) else:", "{}\".format(t2)) log.info(\"Duration: {}\".format(t2 - t1)) if __name__ == \"__main__\": #", "except Exception as e: log.error(\"Test description can not be read\")", "authorization token for the core API.\"\"\" log.info(\"Checking: authorization token for", "not, see <http://www.gnu.org/licenses/>. \"\"\" import sys import os from time", "for the core API.\"\"\" log.info(\"Checking: authorization token for the core", "1 VERSION_MINOR = 0 def check_api_endpoint(api): \"\"\"Check that some API", "cfg[\"user_key\"], True) stack_analysis = StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) check_system(component_analysis) try:", "else: log.success(\"ok\") def check_auth_token(api): \"\"\"Check the authorization token for the", "# current version of this tool VERSION_MAJOR = 1 VERSION_MINOR", "callable.\"\"\" log.info(\"Checking: core API endpoint\") with log.indent(): if not api.is_api_running():", "\"\"\" import sys import os from time import time from", "or (at your option) any later version. This program is", "e: log.error(\"Test description can not be read\") log.error(e) sys.exit(0) t1", "main module of the Analytics API Load Tests tool. Copyright", "= os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) stack_analysis", "WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS", "API Load Tests.\"\"\" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version()", "tested system is not available\") sys.exit(1) else: log.success(\"ok\") def check_auth_token(api):", "be useful, but WITHOUT ANY WARRANTY; without even the implied", "coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True)", "0 def check_api_endpoint(api): \"\"\"Check that some API endpoint is callable.\"\"\"", "\"\"\"Show A2T version.\"\"\" print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): \"\"\"Entry", "def check_auth_token(api): \"\"\"Check the authorization token for the core API.\"\"\"", "def main(): \"\"\"Entry point to the Analytics API Load Tests.\"\"\"", "time: {}\".format(t2)) log.info(\"Duration: {}\".format(t2 - t1)) if __name__ == \"__main__\":", "available and that tokens are valid.\"\"\" # try to access", "stack_analysis = StackAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) check_system(component_analysis) try: tests =", "implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "system endpoints are available and that tokens are valid.\"\"\" #", "start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2 = time() log.info(\"Start time:", "cfg[\"tags\"] start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2 = time() log.info(\"Start", "check_api_endpoint(api) check_auth_token(api) def show_version(): \"\"\"Show A2T version.\"\"\" print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR,", "of the License, or (at your option) any later version.", "published by the Free Software Foundation, either version 3 of", "setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"],", "the core API\") with log.indent(): if api.check_auth_token_validity(): log.success(\"ok\") else: log.error(\"Fatal:", "system endpoints log.info(\"System check\") with log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version():", "General Public License as published by the Free Software Foundation,", "component_analysis import ComponentAnalysis from stack_analysis import StackAnalysis from test_runner import", "tests = read_csv_as_dicts(cfg[\"input_file\"]) except Exception as e: log.error(\"Test description can", "You should have received a copy of the GNU General", "sys.exit(1) else: log.success(\"ok\") def check_auth_token(api): \"\"\"Check the authorization token for", "log.error(e) sys.exit(0) t1 = time() tags = cfg[\"tags\"] start_tests(cfg, tests,", "API endpoint is callable.\"\"\" log.info(\"Checking: core API endpoint\") with log.indent():", "cfg[\"user_key\"], True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg[\"input_file\"]) except Exception as", "API Load Tests tool. Copyright (c) 2019 Red Hat Inc.", "check\") with log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version(): \"\"\"Show A2T version.\"\"\"", "sys import os from time import time from fastlog import", "log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else: cfg", "access system endpoints log.info(\"System check\") with log.indent(): check_api_endpoint(api) check_auth_token(api) def", "point to the Analytics API Load Tests.\"\"\" log.setLevel(log.INFO) cli_arguments =", "from csv_reader import read_csv_as_dicts from setup import setup from cliargs", "stack_analysis import StackAnalysis from test_runner import start_tests # current version", "\"\"\"Check that some API endpoint is callable.\"\"\" log.info(\"Checking: core API", "this tool VERSION_MAJOR = 1 VERSION_MINOR = 0 def check_api_endpoint(api):", "{}\".format(t2 - t1)) if __name__ == \"__main__\": # execute only", "else: cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis =", "else: log.error(\"Fatal: wrong token(?)\") sys.exit(1) def check_system(api): \"\"\"Check if all", "log.error(\"Fatal: wrong token(?)\") sys.exit(1) def check_system(api): \"\"\"Check if all system", "later version. This program is distributed in the hope that", "\"\"\"The main module of the Analytics API Load Tests tool.", "read_csv_as_dicts(cfg[\"input_file\"]) except Exception as e: log.error(\"Test description can not be", "print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): \"\"\"Entry point to the", "even the implied warranty of MERCHANTABILITY or FITNESS FOR A", "FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public", "should have received a copy of the GNU General Public", "def check_api_endpoint(api): \"\"\"Check that some API endpoint is callable.\"\"\" log.info(\"Checking:", "Hat Inc. This program is free software: you can redistribute", "read_csv_as_dicts from setup import setup from cliargs import cli_parser from", "FOR A PARTICULAR PURPOSE. See the GNU General Public License", "you can redistribute it and/or modify it under the terms", "and/or modify it under the terms of the GNU General", "cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url,", "api.check_auth_token_validity(): log.success(\"ok\") else: log.error(\"Fatal: wrong token(?)\") sys.exit(1) def check_system(api): \"\"\"Check", "tags = cfg[\"tags\"] start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2 =", "\"\"\"Entry point to the Analytics API Load Tests.\"\"\" log.setLevel(log.INFO) cli_arguments", "the authorization token for the core API.\"\"\" log.info(\"Checking: authorization token", "in the hope that it will be useful, but WITHOUT", "if cli_arguments.version: show_version() sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url =", "log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version(): \"\"\"Show A2T version.\"\"\" print(\"A2T version", "if not api.is_api_running(): log.error(\"Fatal: tested system is not available\") sys.exit(1)", "import read_csv_as_dicts from setup import setup from cliargs import cli_parser", "cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else: cfg =", "for more details. You should have received a copy of", "useful, but WITHOUT ANY WARRANTY; without even the implied warranty", "test_runner import start_tests # current version of this tool VERSION_MAJOR", "endpoints log.info(\"System check\") with log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version(): \"\"\"Show", "check_auth_token(api): \"\"\"Check the authorization token for the core API.\"\"\" log.info(\"Checking:", "distributed in the hope that it will be useful, but", "wrong token(?)\") sys.exit(1) def check_system(api): \"\"\"Check if all system endpoints", "but WITHOUT ANY WARRANTY; without even the implied warranty of", "program is free software: you can redistribute it and/or modify", "with this program. If not, see <http://www.gnu.org/licenses/>. \"\"\" import sys", "from component_analysis import ComponentAnalysis from stack_analysis import StackAnalysis from test_runner", "the terms of the GNU General Public License as published", "A2T version.\"\"\" print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): \"\"\"Entry point", "log.info(\"End time: {}\".format(t2)) log.info(\"Duration: {}\".format(t2 - t1)) if __name__ ==", "def show_version(): \"\"\"Show A2T version.\"\"\" print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def", "sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis", "log.success(\"ok\") def check_auth_token(api): \"\"\"Check the authorization token for the core", "Foundation, either version 3 of the License, or (at your", "A PARTICULAR PURPOSE. See the GNU General Public License for", "copy of the GNU General Public License along with this", "Tests tool. Copyright (c) 2019 Red Hat Inc. This program", "software: you can redistribute it and/or modify it under the", "from time import time from fastlog import log from csv_reader", "setup import setup from cliargs import cli_parser from component_analysis import", "None) component_analysis = ComponentAnalysis(coreapi_url, cfg[\"access_token\"], cfg[\"user_key\"], True) stack_analysis = StackAnalysis(coreapi_url,", "minor=VERSION_MINOR)) def main(): \"\"\"Entry point to the Analytics API Load", "Inc. This program is free software: you can redistribute it", "by the Free Software Foundation, either version 3 of the", "show_version(): \"\"\"Show A2T version.\"\"\" print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main():", "to access system endpoints log.info(\"System check\") with log.indent(): check_api_endpoint(api) check_auth_token(api)", "the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR", "check_api_endpoint(api): \"\"\"Check that some API endpoint is callable.\"\"\" log.info(\"Checking: core", "version. This program is distributed in the hope that it", "system is not available\") sys.exit(1) else: log.success(\"ok\") def check_auth_token(api): \"\"\"Check", "endpoints are available and that tokens are valid.\"\"\" # try", "have received a copy of the GNU General Public License", "version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): \"\"\"Entry point to the Analytics", "tool VERSION_MAJOR = 1 VERSION_MINOR = 0 def check_api_endpoint(api): \"\"\"Check", "{major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): \"\"\"Entry point to the Analytics API", "setup from cliargs import cli_parser from component_analysis import ComponentAnalysis from", "your option) any later version. This program is distributed in", "StackAnalysis from test_runner import start_tests # current version of this", "if all system endpoints are available and that tokens are", "import setup from cliargs import cli_parser from component_analysis import ComponentAnalysis", "(c) 2019 Red Hat Inc. This program is free software:", "log.info(\"System check\") with log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version(): \"\"\"Show A2T", "tokens are valid.\"\"\" # try to access system endpoints log.info(\"System", "= cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else: cfg = setup(cli_arguments)", "version.\"\"\" print(\"A2T version {major}.{minor}\".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): \"\"\"Entry point to", "authorization token for the core API\") with log.indent(): if api.check_auth_token_validity():", "Exception as e: log.error(\"Test description can not be read\") log.error(e)", "License along with this program. If not, see <http://www.gnu.org/licenses/>. \"\"\"", "details. You should have received a copy of the GNU", "main(): \"\"\"Entry point to the Analytics API Load Tests.\"\"\" log.setLevel(log.INFO)", "endpoint is callable.\"\"\" log.info(\"Checking: core API endpoint\") with log.indent(): if", "2019 Red Hat Inc. This program is free software: you", "to the Analytics API Load Tests.\"\"\" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args()", "current version of this tool VERSION_MAJOR = 1 VERSION_MINOR =", "is distributed in the hope that it will be useful,", "without even the implied warranty of MERCHANTABILITY or FITNESS FOR", "time: {}\".format(t1)) log.info(\"End time: {}\".format(t2)) log.info(\"Duration: {}\".format(t2 - t1)) if" ]
[ "riscv_ctg.utils as utils import riscv_ctg.constants as const from riscv_isac.cgf_normalize import", "cgf_argument += '// --cgf {} \\\\\\n'.format(cf) randomize_argument = '' if", "+ ' GMT' cgf_argument = '' for cf in cgf_file:", "my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst) def ctg(verbose, out,", "foo['std_op'] is not None and opcode==foo['std_op']: op_node = foo break", "time import shutil from riscv_ctg.log import logger import riscv_ctg.utils as", "import time import shutil from riscv_ctg.log import logger import riscv_ctg.utils", "supported in current XLEN:\".format(opcode)) return if 'flen' in op_node: if", "random mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT' cgf_argument =", "mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT' cgf_argument = ''", "in op_node['xlen']: logger.warning(\"Skipping {0} since its not supported in current", "= random mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT' cgf_argument", "global out_dir global xlen flen = 0 if 'opcode' not", "+ str(opcode)) return if xlen not in op_node['xlen']: logger.warning(\"Skipping {0}", "None: logger.warning(\"Skipping :\" + str(opcode)) return if xlen not in", "# return fprefix = os.path.join(out_dir,str(label)) logger.info('Generating Test for :' +", "gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info(\"Writing tests for :\"+str(label)) my_dict = gen.reformat_instr(instr_dict)", "if op_node is None: logger.warning(\"Skipping :\" + str(opcode)) return if", "* from riscv_ctg.__init__ import __version__ def create_test(usage_str, node,label,base_isa,max_inst): global op_template", "shutil.copytree(const.env,env_dir) xlen = int(xlen_arg) out_dir = out randomize = random", "expand_cgf from riscv_ctg.generator import Generator from math import * from", "xlen logger.level(verbose) logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ ))", "str(label) +\"-\" + opcode) formattype = op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa)", "= gen.opcomb(node) val_comb = gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node))))", "op_template global ramdomize global out_dir global xlen flen = 0", "return fprefix = os.path.join(out_dir,str(label)) logger.info('Generating Test for :' + str(label)", "opcode: flen = 32 else: flen = op_node['flen'][0] #if flen", "= Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node) val_comb = gen.valcomb(node) instr_dict =", "--cgf {} \\\\\\n'.format(cf) randomize_argument = '' if random is True:", "const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument, version = __version__, time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template", "Rights Reserved.') logger.info(\"Copying env folder to Output directory.\") env_dir =", "'flen' in op_node: if '.d' in opcode: flen = 64", "not in node: return if 'ignore' in node: logger.info(\"Ignoring :\"", "--randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument, version = __version__, time=mytime,", "as mp import time import shutil from riscv_ctg.log import logger", "max_inst) def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template", "if 'ignore' in node: logger.info(\"Ignoring :\" + str(label)) if node['ignore']:", "(c) 2020, InCore Semiconductors Pvt. Ltd.') logger.info('All Rights Reserved.') logger.info(\"Copying", "instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info(\"Writing tests for :\"+str(label)) my_dict", "return if 'ignore' in node: logger.info(\"Ignoring :\" + str(label)) if", "riscv_ctg.__init__ import __version__ def create_test(usage_str, node,label,base_isa,max_inst): global op_template global ramdomize", "+ opcode) formattype = op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb =", "results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in cgf.items()]) pool.close()", "= 32 else: flen = op_node['flen'][0] #if flen not in", "riscv_ctg.generator import Generator from math import * from riscv_ctg.__init__ import", "os.path.join(out_dir,str(label)) logger.info('Generating Test for :' + str(label) +\"-\" + opcode)", "else: flen = op_node['flen'][0] #if flen not in op_node['flen']: #", "flen = 64 elif '.s' in opcode: flen = 32", "randomize = random mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT'", "if 'opcode' not in node: return if 'ignore' in node:", "opcode: flen = 64 elif '.s' in opcode: flen =", "xlen flen = 0 if 'opcode' not in node: return", "cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template global randomize global out_dir global xlen", "if op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']: op_node", "for cf in cgf_file: cgf_argument += '// --cgf {} \\\\\\n'.format(cf)", "import * from riscv_ctg.__init__ import __version__ def create_test(usage_str, node,label,base_isa,max_inst): global", "\\\\\\n'.format(cf) randomize_argument = '' if random is True: randomize_argument =", "utils import riscv_ctg.constants as const from riscv_isac.cgf_normalize import expand_cgf from", "formattype = op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node) val_comb", ") + ' GMT' cgf_argument = '' for cf in", "logger import riscv_ctg.utils as utils import riscv_ctg.constants as const from", "global xlen flen = 0 if 'opcode' not in node:", "randomize global out_dir global xlen logger.level(verbose) logger.info('****** RISC-V Compliance Test", "env folder to Output directory.\") env_dir = os.path.join(out,\"env\") if not", "random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template global randomize global out_dir", "multiprocessing as mp import time import shutil from riscv_ctg.log import", "= gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info(\"Writing tests for", "out_dir = out randomize = random mytime = time.asctime(time.gmtime(time.time()) )", "= utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs) results =", "val_comb = gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info(\"Writing tests", "cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs) results = pool.starmap(create_test, [(usage_str,", "str(label)) if node['ignore']: return for opcode in node['opcode']: op_node=None if", "in opcode: flen = 32 else: flen = op_node['flen'][0] #if", "Generator {0} *******'.format(__version__ )) logger.info('Copyright (c) 2020, InCore Semiconductors Pvt.", "file for details import os,re import multiprocessing as mp import", "= op_node['flen'][0] #if flen not in op_node['flen']: # return fprefix", "'.d' in opcode: flen = 64 elif '.s' in opcode:", "= foo break else: op_node = op_template[opcode] if op_node is", "in opcode: flen = 64 elif '.s' in opcode: flen", "import riscv_ctg.utils as utils import riscv_ctg.constants as const from riscv_isac.cgf_normalize", "gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node) val_comb = gen.valcomb(node) instr_dict", "gen.opcomb(node) val_comb = gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info(\"Writing", "'' for cf in cgf_file: cgf_argument += '// --cgf {}", "not in op_node['xlen']: logger.warning(\"Skipping {0} since its not supported in", "for :\"+str(label)) my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst) def", "__version__ def create_test(usage_str, node,label,base_isa,max_inst): global op_template global ramdomize global out_dir", "for :' + str(label) +\"-\" + opcode) formattype = op_node['formattype']", "import os,re import multiprocessing as mp import time import shutil", "op_node, usage_str, max_inst) def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate):", "+= '// --cgf {} \\\\\\n'.format(cf) randomize_argument = '' if random", "opcode) formattype = op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node)", "since its not supported in current XLEN:\".format(opcode)) return if 'flen'", "import shutil from riscv_ctg.log import logger import riscv_ctg.utils as utils", "= gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info(\"Writing tests for :\"+str(label)) my_dict =", "flen = op_node['flen'][0] #if flen not in op_node['flen']: # return", "logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.') logger.info('All Rights Reserved.')", "time.asctime(time.gmtime(time.time()) ) + ' GMT' cgf_argument = '' for cf", "and opcode==foo['std_op']: op_node = foo break else: op_node = op_template[opcode]", "if random is True: randomize_argument = ' \\\\\\n// --randomize' usage_str", "in op_node: if '.d' in opcode: flen = 64 elif", "*******'.format(__version__ )) logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.') logger.info('All", "node['opcode']: op_node=None if opcode not in op_template: for op,foo in", "for op,foo in op_template.items(): if op!='metadata' and foo['std_op'] is not", "\\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool =", "logger.info('Generating Test for :' + str(label) +\"-\" + opcode) formattype", "See LICENSE.incore file for details import os,re import multiprocessing as", "op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']: op_node =", "Output directory.\") env_dir = os.path.join(out,\"env\") if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen", "random is True: randomize_argument = ' \\\\\\n// --randomize' usage_str =", "+ str(label)) if node['ignore']: return for opcode in node['opcode']: op_node=None", "return if xlen not in op_node['xlen']: logger.warning(\"Skipping {0} since its", "in current XLEN:\".format(opcode)) return if 'flen' in op_node: if '.d'", "folder to Output directory.\") env_dir = os.path.join(out,\"env\") if not os.path.exists(env_dir):", "'' if random is True: randomize_argument = ' \\\\\\n// --randomize'", "if opcode not in op_template: for op,foo in op_template.items(): if", "usage_str = const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument, version = __version__, time=mytime, \\", "def create_test(usage_str, node,label,base_isa,max_inst): global op_template global ramdomize global out_dir global", "randomize_argument = ' \\\\\\n// --randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument,", "'opcode' not in node: return if 'ignore' in node: logger.info(\"Ignoring", "randomize_argument = '' if random is True: randomize_argument = '", "and foo['std_op'] is not None and opcode==foo['std_op']: op_node = foo", "return for opcode in node['opcode']: op_node=None if opcode not in", "True: randomize_argument = ' \\\\\\n// --randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa, \\", "op_node = op_template[opcode] if op_node is None: logger.warning(\"Skipping :\" +", "os.path.join(out,\"env\") if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen = int(xlen_arg) out_dir =", "global ramdomize global out_dir global xlen flen = 0 if", "flen = 0 if 'opcode' not in node: return if", "foo break else: op_node = op_template[opcode] if op_node is None:", "Pvt. Ltd.') logger.info('All Rights Reserved.') logger.info(\"Copying env folder to Output", "cf in cgf_file: cgf_argument += '// --cgf {} \\\\\\n'.format(cf) randomize_argument", "os,re import multiprocessing as mp import time import shutil from", "in op_template.items(): if op!='metadata' and foo['std_op'] is not None and", "op_template: for op,foo in op_template.items(): if op!='metadata' and foo['std_op'] is", "not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen = int(xlen_arg) out_dir = out randomize", "expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs) results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for", "op_template.items(): if op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']:", "Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node) val_comb = gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb,", "xlen = int(xlen_arg) out_dir = out randomize = random mytime", "not in op_node['flen']: # return fprefix = os.path.join(out_dir,str(label)) logger.info('Generating Test", "val_comb, node)))) logger.info(\"Writing tests for :\"+str(label)) my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict,", "\\ cgf=cgf_argument, version = __version__, time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template =", "' GMT' cgf_argument = '' for cf in cgf_file: cgf_argument", "else: op_node = op_template[opcode] if op_node is None: logger.warning(\"Skipping :\"", "math import * from riscv_ctg.__init__ import __version__ def create_test(usage_str, node,label,base_isa,max_inst):", "logger.info(\"Ignoring :\" + str(label)) if node['ignore']: return for opcode in", "from riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator import Generator from math", "op_template global randomize global out_dir global xlen logger.level(verbose) logger.info('****** RISC-V", "directory.\") env_dir = os.path.join(out,\"env\") if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen =", "import logger import riscv_ctg.utils as utils import riscv_ctg.constants as const", "Semiconductors Pvt. Ltd.') logger.info('All Rights Reserved.') logger.info(\"Copying env folder to", "in op_node['flen']: # return fprefix = os.path.join(out_dir,str(label)) logger.info('Generating Test for", "node: logger.info(\"Ignoring :\" + str(label)) if node['ignore']: return for opcode", "'// --cgf {} \\\\\\n'.format(cf) randomize_argument = '' if random is", ":\"+str(label)) my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst) def ctg(verbose,", "its not supported in current XLEN:\".format(opcode)) return if 'flen' in", "os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen = int(xlen_arg) out_dir = out randomize =", "= time.asctime(time.gmtime(time.time()) ) + ' GMT' cgf_argument = '' for", "{0} *******'.format(__version__ )) logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.')", "logger.warning(\"Skipping :\" + str(opcode)) return if xlen not in op_node['xlen']:", "node['ignore']: return for opcode in node['opcode']: op_node=None if opcode not", "flen not in op_node['flen']: # return fprefix = os.path.join(out_dir,str(label)) logger.info('Generating", "= int(xlen_arg) out_dir = out randomize = random mytime =", "gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst) def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa,", "op_node=None if opcode not in op_template: for op,foo in op_template.items():", "xlen not in op_node['xlen']: logger.warning(\"Skipping {0} since its not supported", "global xlen logger.level(verbose) logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__", "if '.d' in opcode: flen = 64 elif '.s' in", "riscv_ctg.log import logger import riscv_ctg.utils as utils import riscv_ctg.constants as", "in node: logger.info(\"Ignoring :\" + str(label)) if node['ignore']: return for", "= os.path.join(out,\"env\") if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen = int(xlen_arg) out_dir", "global out_dir global xlen logger.level(verbose) logger.info('****** RISC-V Compliance Test Generator", "cgf_file: cgf_argument += '// --cgf {} \\\\\\n'.format(cf) randomize_argument = ''", "riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator import Generator from math import", "env_dir = os.path.join(out,\"env\") if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen = int(xlen_arg)", "as utils import riscv_ctg.constants as const from riscv_isac.cgf_normalize import expand_cgf", "import __version__ def create_test(usage_str, node,label,base_isa,max_inst): global op_template global ramdomize global", "utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs) results = pool.starmap(create_test,", "Ltd.') logger.info('All Rights Reserved.') logger.info(\"Copying env folder to Output directory.\")", "usage_str, max_inst) def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global", "shutil from riscv_ctg.log import logger import riscv_ctg.utils as utils import", "opcode in node['opcode']: op_node=None if opcode not in op_template: for", "ramdomize global out_dir global xlen flen = 0 if 'opcode'", "op,foo in op_template.items(): if op!='metadata' and foo['std_op'] is not None", "2020, InCore Semiconductors Pvt. Ltd.') logger.info('All Rights Reserved.') logger.info(\"Copying env", "GMT' cgf_argument = '' for cf in cgf_file: cgf_argument +=", "is not None and opcode==foo['std_op']: op_node = foo break else:", "node)))) logger.info(\"Writing tests for :\"+str(label)) my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node,", ":\" + str(label)) if node['ignore']: return for opcode in node['opcode']:", "global randomize global out_dir global xlen logger.level(verbose) logger.info('****** RISC-V Compliance", "max_inst,list_duplicate): global op_template global randomize global out_dir global xlen logger.level(verbose)", "is True: randomize_argument = ' \\\\\\n// --randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa,", "logger.info(\"Copying env folder to Output directory.\") env_dir = os.path.join(out,\"env\") if", "' \\\\\\n// --randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument, version =", "InCore Semiconductors Pvt. Ltd.') logger.info('All Rights Reserved.') logger.info(\"Copying env folder", "LICENSE.incore file for details import os,re import multiprocessing as mp", ")) logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.') logger.info('All Rights", "import multiprocessing as mp import time import shutil from riscv_ctg.log", "as const from riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator import Generator", "+\"-\" + opcode) formattype = op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb", "break else: op_node = op_template[opcode] if op_node is None: logger.warning(\"Skipping", "Reserved.') logger.info(\"Copying env folder to Output directory.\") env_dir = os.path.join(out,\"env\")", "randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs)", "op_node is None: logger.warning(\"Skipping :\" + str(opcode)) return if xlen", ",xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template global randomize global out_dir global", "Test for :' + str(label) +\"-\" + opcode) formattype =", "= op_template[opcode] if op_node is None: logger.warning(\"Skipping :\" + str(opcode))", "Compliance Test Generator {0} *******'.format(__version__ )) logger.info('Copyright (c) 2020, InCore", "str(opcode)) return if xlen not in op_node['xlen']: logger.warning(\"Skipping {0} since", "{0} since its not supported in current XLEN:\".format(opcode)) return if", "op_node['xlen']: logger.warning(\"Skipping {0} since its not supported in current XLEN:\".format(opcode))", "\\\\\\n// --randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument, version = __version__,", "cgf_argument = '' for cf in cgf_file: cgf_argument += '//", "logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ )) logger.info('Copyright (c)", "in op_template: for op,foo in op_template.items(): if op!='metadata' and foo['std_op']", "op_node: if '.d' in opcode: flen = 64 elif '.s'", "op_comb = gen.opcomb(node) val_comb = gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb,", "out_dir global xlen flen = 0 if 'opcode' not in", "= __version__, time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file) cgf =", "if xlen not in op_node['xlen']: logger.warning(\"Skipping {0} since its not", "details import os,re import multiprocessing as mp import time import", "op_template = utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs) results", "= mp.Pool(num_procs) results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in", "in node['opcode']: op_node=None if opcode not in op_template: for op,foo", "riscv_ctg.constants as const from riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator import", "op_node['flen'][0] #if flen not in op_node['flen']: # return fprefix =", "return if 'flen' in op_node: if '.d' in opcode: flen", "= '' if random is True: randomize_argument = ' \\\\\\n//", "= op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node) val_comb =", "node,label,base_isa,max_inst): global op_template global ramdomize global out_dir global xlen flen", "gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst) def ctg(verbose, out, random ,xlen_arg,", "out_dir global xlen logger.level(verbose) logger.info('****** RISC-V Compliance Test Generator {0}", "#if flen not in op_node['flen']: # return fprefix = os.path.join(out_dir,str(label))", "= gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst) def ctg(verbose, out, random", ":\" + str(opcode)) return if xlen not in op_node['xlen']: logger.warning(\"Skipping", "= expand_cgf(cgf_file,xlen,list_duplicate) pool = mp.Pool(num_procs) results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst)", "import riscv_ctg.constants as const from riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator", "= os.path.join(out_dir,str(label)) logger.info('Generating Test for :' + str(label) +\"-\" +", "is None: logger.warning(\"Skipping :\" + str(opcode)) return if xlen not", "from riscv_ctg.generator import Generator from math import * from riscv_ctg.__init__", "import Generator from math import * from riscv_ctg.__init__ import __version__", "not None and opcode==foo['std_op']: op_node = foo break else: op_node", "op_node['flen']: # return fprefix = os.path.join(out_dir,str(label)) logger.info('Generating Test for :'", "to Output directory.\") env_dir = os.path.join(out,\"env\") if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir)", "if node['ignore']: return for opcode in node['opcode']: op_node=None if opcode", "in node: return if 'ignore' in node: logger.info(\"Ignoring :\" +", "opcode==foo['std_op']: op_node = foo break else: op_node = op_template[opcode] if", "cgf=cgf_argument, version = __version__, time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file)", "tests for :\"+str(label)) my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst)", "+ str(label) +\"-\" + opcode) formattype = op_node['formattype'] gen =", "node: return if 'ignore' in node: logger.info(\"Ignoring :\" + str(label))", "RISC-V Compliance Test Generator {0} *******'.format(__version__ )) logger.info('Copyright (c) 2020,", "64 elif '.s' in opcode: flen = 32 else: flen", "32 else: flen = op_node['flen'][0] #if flen not in op_node['flen']:", "time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate) pool", "'.s' in opcode: flen = 32 else: flen = op_node['flen'][0]", "version = __version__, time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file) cgf", "gen.valcomb(node) instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node)))) logger.info(\"Writing tests for :\"+str(label))", "= out randomize = random mytime = time.asctime(time.gmtime(time.time()) ) +", "op_node['formattype'] gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa) op_comb = gen.opcomb(node) val_comb = gen.valcomb(node)", "logger.info('All Rights Reserved.') logger.info(\"Copying env folder to Output directory.\") env_dir", "= '' for cf in cgf_file: cgf_argument += '// --cgf", ":' + str(label) +\"-\" + opcode) formattype = op_node['formattype'] gen", "from math import * from riscv_ctg.__init__ import __version__ def create_test(usage_str,", "= ' \\\\\\n// --randomize' usage_str = const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument, version", "global op_template global ramdomize global out_dir global xlen flen =", "if not os.path.exists(env_dir): shutil.copytree(const.env,env_dir) xlen = int(xlen_arg) out_dir = out", "= 64 elif '.s' in opcode: flen = 32 else:", "{} \\\\\\n'.format(cf) randomize_argument = '' if random is True: randomize_argument", "not supported in current XLEN:\".format(opcode)) return if 'flen' in op_node:", "None and opcode==foo['std_op']: op_node = foo break else: op_node =", "import expand_cgf from riscv_ctg.generator import Generator from math import *", "global op_template global randomize global out_dir global xlen logger.level(verbose) logger.info('******", "out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template global randomize global", "__version__, time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg)) op_template = utils.load_yaml(const.template_file) cgf = expand_cgf(cgf_file,xlen,list_duplicate)", "from riscv_ctg.log import logger import riscv_ctg.utils as utils import riscv_ctg.constants", "= const.usage.safe_substitute(base_isa=base_isa, \\ cgf=cgf_argument, version = __version__, time=mytime, \\ randomize=randomize_argument,xlen=str(xlen_arg))", "out randomize = random mytime = time.asctime(time.gmtime(time.time()) ) + '", "flen = 32 else: flen = op_node['flen'][0] #if flen not", "ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template global randomize", "# See LICENSE.incore file for details import os,re import multiprocessing", "create_test(usage_str, node,label,base_isa,max_inst): global op_template global ramdomize global out_dir global xlen", "from riscv_ctg.__init__ import __version__ def create_test(usage_str, node,label,base_isa,max_inst): global op_template global", "in cgf_file: cgf_argument += '// --cgf {} \\\\\\n'.format(cf) randomize_argument =", "for opcode in node['opcode']: op_node=None if opcode not in op_template:", "not in op_template: for op,foo in op_template.items(): if op!='metadata' and", "= 0 if 'opcode' not in node: return if 'ignore'", "for details import os,re import multiprocessing as mp import time", "Test Generator {0} *******'.format(__version__ )) logger.info('Copyright (c) 2020, InCore Semiconductors", "logger.info(\"Writing tests for :\"+str(label)) my_dict = gen.reformat_instr(instr_dict) gen.write_test(fprefix,node,label,my_dict, op_node, usage_str,", "fprefix = os.path.join(out_dir,str(label)) logger.info('Generating Test for :' + str(label) +\"-\"", "0 if 'opcode' not in node: return if 'ignore' in", "def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate): global op_template global", "op_node = foo break else: op_node = op_template[opcode] if op_node", "Generator from math import * from riscv_ctg.__init__ import __version__ def", "'ignore' in node: logger.info(\"Ignoring :\" + str(label)) if node['ignore']: return", "pool = mp.Pool(num_procs) results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node", "elif '.s' in opcode: flen = 32 else: flen =", "current XLEN:\".format(opcode)) return if 'flen' in op_node: if '.d' in", "op_template[opcode] if op_node is None: logger.warning(\"Skipping :\" + str(opcode)) return", "mp import time import shutil from riscv_ctg.log import logger import", "const from riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator import Generator from", "mp.Pool(num_procs) results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in cgf.items()])", "logger.warning(\"Skipping {0} since its not supported in current XLEN:\".format(opcode)) return", "logger.level(verbose) logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ )) logger.info('Copyright", "opcode not in op_template: for op,foo in op_template.items(): if op!='metadata'", "if 'flen' in op_node: if '.d' in opcode: flen =", "int(xlen_arg) out_dir = out randomize = random mytime = time.asctime(time.gmtime(time.time())", "XLEN:\".format(opcode)) return if 'flen' in op_node: if '.d' in opcode:" ]
[ "i in range(clock): if end_period[-1].lower() == 'am': end_period.append('PM') else: end_period.append('AM')", "new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday:", "end_period.append('AM') return MainTimer(new_time_joined, end_period, new_weekday, days) # Triggers process time", "if end_period[-1].lower() == 'am': end_period.append('PM') else: end_period.append('AM') return MainTimer(new_time_joined, end_period,", "[ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ] start_time,", "new_time += f'- {timed.new_weekday} -' if timed.days == 1 and", "Total end_hours, end_mins = (current_hour + end_hour, current_minute + end_minute)", "days') def add_time(start, duration, start_weekday=None): weekdays = [ 'Monday', 'Tuesday',", "-' if timed.days == 1 and (period != timed.end_period or", "add_time(start, duration, start_weekday=None): weekdays = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday',", "range(clock): if end_period[-1].lower() == 'am': end_period.append('PM') else: end_period.append('AM') return MainTimer(new_time_joined,", "Figure out whether is AM or PM for i in", "+ end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')] new_time_joined", "'Friday', 'Saturday', 'Sunday' ] start_time, period = start.split(' ') def", "'Saturday', 'Sunday' ] start_time, period = start.split(' ') def process_time():", "collections import namedtuple MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days')", "duration.split(':')]) # Adds Current time plus End Time Total end_hours,", "current_minute + end_minute) # Calculates Total days passed days =", "% 7] else: new_weekday = False # Figure out whether", "Triggers process time function timed = process_time() def process_output(): new_time", "timed.new_weekday: new_time += f'- {timed.new_weekday} -' if timed.days == 1", "timed.end_period == 'AM'): new_time += ' (new_day)' elif timed.days >", "' (new_day)' elif timed.days > 1: new_time += f' -Total", "'new_time_joined, end_period, new_weekday, days') def add_time(start, duration, start_weekday=None): weekdays =", "new_time += ' (new_day)' elif timed.days > 1: new_time +=", "end_hour, current_minute + end_minute) # Calculates Total days passed days", "+= f'- {timed.new_weekday} -' if timed.days == 1 and (period", "elif timed.days > 1: new_time += f' -Total days: {timed.days}-", "60).rjust(2, '0')] new_time_joined = ''.join(new_time_array) end_period = [period] # Clock,", "days elapsed clock = end_hours // 12 if start_weekday: start_day_idx", "AM or PM for i in range(clock): if end_period[-1].lower() ==", "passed days = int(end_hours/24) # Calculates New Time new_time_array =", "+ days % 7) % 7] else: new_weekday = False", "'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ] start_time, period", "for d in duration.split(':')]) # Adds Current time plus End", "new_time_joined = ''.join(new_time_array) end_period = [period] # Clock, calculates the", "else: end_period.append('AM') return MainTimer(new_time_joined, end_period, new_weekday, days) # Triggers process", "str(end_mins % 60).rjust(2, '0')] new_time_joined = ''.join(new_time_array) end_period = [period]", "function timed = process_time() def process_output(): new_time = f'New Time", "Calculates New Time new_time_array = [str(end_hours % 12 + end_mins", "= ''.join(new_time_array) end_period = [period] # Clock, calculates the days", "new_time += f' -Total days: {timed.days}- <<' return new_time new_time", "== 'AM'): new_time += ' (new_day)' elif timed.days > 1:", "= False # Figure out whether is AM or PM", "60), ':', str(end_mins % 60).rjust(2, '0')] new_time_joined = ''.join(new_time_array) end_period", "timed.end_period or timed.end_period == 'AM'): new_time += ' (new_day)' elif", "(period != timed.end_period or timed.end_period == 'AM'): new_time += '", "process_output() return new_time print('---'*30) x = add_time('10:00 AM', '54:00', 'Monday')", "calculates the days elapsed clock = end_hours // 12 if", "# Figure out whether is AM or PM for i", "> 1: new_time += f' -Total days: {timed.days}- <<' return", "in start_time.split(':')]) end_hour, end_minute = ([int(d) for d in duration.split(':')])", "f'- {timed.new_weekday} -' if timed.days == 1 and (period !=", "f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday: new_time +=", "= f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday: new_time", "return new_time new_time = process_output() return new_time print('---'*30) x =", "Adds Current time plus End Time Total end_hours, end_mins =", "else: new_weekday = False # Figure out whether is AM", "t in start_time.split(':')]) end_hour, end_minute = ([int(d) for d in", "12 + end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')]", "{timed.days}- <<' return new_time new_time = process_output() return new_time print('---'*30)", "'AM'): new_time += ' (new_day)' elif timed.days > 1: new_time", "MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days') def add_time(start, duration,", "<<' return new_time new_time = process_output() return new_time print('---'*30) x", "end_hours, end_mins = (current_hour + end_hour, current_minute + end_minute) #", "':', str(end_mins % 60).rjust(2, '0')] new_time_joined = ''.join(new_time_array) end_period =", "start.split(' ') def process_time(): current_hour, current_minute = ([int(t) for t", "end_period.append('PM') else: end_period.append('AM') return MainTimer(new_time_joined, end_period, new_weekday, days) # Triggers", "'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ] start_time, period = start.split('", "7) % 7] else: new_weekday = False # Figure out", "== 1 and (period != timed.end_period or timed.end_period == 'AM'):", "% 12 + end_mins // 60), ':', str(end_mins % 60).rjust(2,", "(current_hour + end_hour, current_minute + end_minute) # Calculates Total days", "# Calculates Total days passed days = int(end_hours/24) # Calculates", "end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')] new_time_joined =", "'Thursday', 'Friday', 'Saturday', 'Sunday' ] start_time, period = start.split(' ')", "= weekdays[(start_day_idx + days % 7) % 7] else: new_weekday", "+ end_hour, current_minute + end_minute) # Calculates Total days passed", "end_hour, end_minute = ([int(d) for d in duration.split(':')]) # Adds", "d in duration.split(':')]) # Adds Current time plus End Time", "([int(d) for d in duration.split(':')]) # Adds Current time plus", ">>> {timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday: new_time += f'- {timed.new_weekday} -'", "Time is >>> {timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday: new_time += f'-", "[str(end_hours % 12 + end_mins // 60), ':', str(end_mins %", "= end_hours // 12 if start_weekday: start_day_idx = weekdays.index(start_weekday.title()) new_weekday", "''.join(new_time_array) end_period = [period] # Clock, calculates the days elapsed", "([int(t) for t in start_time.split(':')]) end_hour, end_minute = ([int(d) for", "end_period, new_weekday, days') def add_time(start, duration, start_weekday=None): weekdays = [", "namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days') def add_time(start, duration, start_weekday=None): weekdays", "period = start.split(' ') def process_time(): current_hour, current_minute = ([int(t)", "end_mins = (current_hour + end_hour, current_minute + end_minute) # Calculates", "days) # Triggers process time function timed = process_time() def", "Calculates Total days passed days = int(end_hours/24) # Calculates New", "end_period, new_weekday, days) # Triggers process time function timed =", "# Clock, calculates the days elapsed clock = end_hours //", "weekdays[(start_day_idx + days % 7) % 7] else: new_weekday =", "end_minute = ([int(d) for d in duration.split(':')]) # Adds Current", "elapsed clock = end_hours // 12 if start_weekday: start_day_idx =", "if start_weekday: start_day_idx = weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx + days", "7] else: new_weekday = False # Figure out whether is", "days: {timed.days}- <<' return new_time new_time = process_output() return new_time", "start_weekday: start_day_idx = weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx + days %", "new_time = process_output() return new_time print('---'*30) x = add_time('10:00 AM',", "!= timed.end_period or timed.end_period == 'AM'): new_time += ' (new_day)'", "new_weekday, days) # Triggers process time function timed = process_time()", "in duration.split(':')]) # Adds Current time plus End Time Total", "= process_output() return new_time print('---'*30) x = add_time('10:00 AM', '54:00',", "'Sunday' ] start_time, period = start.split(' ') def process_time(): current_hour,", "% 60).rjust(2, '0')] new_time_joined = ''.join(new_time_array) end_period = [period] #", "int(end_hours/24) # Calculates New Time new_time_array = [str(end_hours % 12", "False # Figure out whether is AM or PM for", "'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ] start_time, period =", "or PM for i in range(clock): if end_period[-1].lower() == 'am':", "start_time.split(':')]) end_hour, end_minute = ([int(d) for d in duration.split(':')]) #", "= [period] # Clock, calculates the days elapsed clock =", "New Time new_time_array = [str(end_hours % 12 + end_mins //", "def process_time(): current_hour, current_minute = ([int(t) for t in start_time.split(':')])", "plus End Time Total end_hours, end_mins = (current_hour + end_hour,", "-Total days: {timed.days}- <<' return new_time new_time = process_output() return", "out whether is AM or PM for i in range(clock):", "start_weekday=None): weekdays = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',", "# Triggers process time function timed = process_time() def process_output():", "= [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ]", "current_hour, current_minute = ([int(t) for t in start_time.split(':')]) end_hour, end_minute", "days % 7) % 7] else: new_weekday = False #", "new_weekday = False # Figure out whether is AM or", "= ([int(d) for d in duration.split(':')]) # Adds Current time", "new_weekday = weekdays[(start_day_idx + days % 7) % 7] else:", "in range(clock): if end_period[-1].lower() == 'am': end_period.append('PM') else: end_period.append('AM') return", "weekdays = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'", "days = int(end_hours/24) # Calculates New Time new_time_array = [str(end_hours", "+= ' (new_day)' elif timed.days > 1: new_time += f'", "1 and (period != timed.end_period or timed.end_period == 'AM'): new_time", "end_period = [period] # Clock, calculates the days elapsed clock", "new_time new_time = process_output() return new_time print('---'*30) x = add_time('10:00", "= start.split(' ') def process_time(): current_hour, current_minute = ([int(t) for", "from collections import namedtuple MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday,", "for t in start_time.split(':')]) end_hour, end_minute = ([int(d) for d", "<filename>Back-End/Python/timers/clock_named_tuple.py from collections import namedtuple MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period,", "MainTimer(new_time_joined, end_period, new_weekday, days) # Triggers process time function timed", "{timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday: new_time += f'- {timed.new_weekday} -' if", "return new_time print('---'*30) x = add_time('10:00 AM', '54:00', 'Monday') print(x)", "= (current_hour + end_hour, current_minute + end_minute) # Calculates Total", "'0')] new_time_joined = ''.join(new_time_array) end_period = [period] # Clock, calculates", "def process_output(): new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}'", "timed.days == 1 and (period != timed.end_period or timed.end_period ==", "{timed.new_weekday} -' if timed.days == 1 and (period != timed.end_period", "new_time print('---'*30) x = add_time('10:00 AM', '54:00', 'Monday') print(x) print('---'*30)", "End Time Total end_hours, end_mins = (current_hour + end_hour, current_minute", "f' -Total days: {timed.days}- <<' return new_time new_time = process_output()", "is >>> {timed.new_time_joined} {timed.end_period[-1]}' if timed.new_weekday: new_time += f'- {timed.new_weekday}", "// 60), ':', str(end_mins % 60).rjust(2, '0')] new_time_joined = ''.join(new_time_array)", "process time function timed = process_time() def process_output(): new_time =", "is AM or PM for i in range(clock): if end_period[-1].lower()", "if timed.new_weekday: new_time += f'- {timed.new_weekday} -' if timed.days ==", "def add_time(start, duration, start_weekday=None): weekdays = [ 'Monday', 'Tuesday', 'Wednesday',", "+= f' -Total days: {timed.days}- <<' return new_time new_time =", "namedtuple MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days') def add_time(start,", "PM for i in range(clock): if end_period[-1].lower() == 'am': end_period.append('PM')", "% 7) % 7] else: new_weekday = False # Figure", "or timed.end_period == 'AM'): new_time += ' (new_day)' elif timed.days", "end_minute) # Calculates Total days passed days = int(end_hours/24) #", "time function timed = process_time() def process_output(): new_time = f'New", "days passed days = int(end_hours/24) # Calculates New Time new_time_array", "# Adds Current time plus End Time Total end_hours, end_mins", "= process_time() def process_output(): new_time = f'New Time is >>>", "] start_time, period = start.split(' ') def process_time(): current_hour, current_minute", "for i in range(clock): if end_period[-1].lower() == 'am': end_period.append('PM') else:", "time plus End Time Total end_hours, end_mins = (current_hour +", "return MainTimer(new_time_joined, end_period, new_weekday, days) # Triggers process time function", "start_day_idx = weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx + days % 7)", "Total days passed days = int(end_hours/24) # Calculates New Time", "current_minute = ([int(t) for t in start_time.split(':')]) end_hour, end_minute =", "[period] # Clock, calculates the days elapsed clock = end_hours", "Clock, calculates the days elapsed clock = end_hours // 12", "end_period[-1].lower() == 'am': end_period.append('PM') else: end_period.append('AM') return MainTimer(new_time_joined, end_period, new_weekday,", "(new_day)' elif timed.days > 1: new_time += f' -Total days:", "whether is AM or PM for i in range(clock): if", "the days elapsed clock = end_hours // 12 if start_weekday:", "= [str(end_hours % 12 + end_mins // 60), ':', str(end_mins", "if timed.days == 1 and (period != timed.end_period or timed.end_period", "// 12 if start_weekday: start_day_idx = weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx", "{timed.end_period[-1]}' if timed.new_weekday: new_time += f'- {timed.new_weekday} -' if timed.days", "start_time, period = start.split(' ') def process_time(): current_hour, current_minute =", "end_hours // 12 if start_weekday: start_day_idx = weekdays.index(start_weekday.title()) new_weekday =", "new_weekday, days') def add_time(start, duration, start_weekday=None): weekdays = [ 'Monday',", "Time new_time_array = [str(end_hours % 12 + end_mins // 60),", "duration, start_weekday=None): weekdays = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',", "process_time() def process_output(): new_time = f'New Time is >>> {timed.new_time_joined}", "= namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days') def add_time(start, duration, start_weekday=None):", "timed.days > 1: new_time += f' -Total days: {timed.days}- <<'", "new_time_array = [str(end_hours % 12 + end_mins // 60), ':',", "') def process_time(): current_hour, current_minute = ([int(t) for t in", "= int(end_hours/24) # Calculates New Time new_time_array = [str(end_hours %", "= ([int(t) for t in start_time.split(':')]) end_hour, end_minute = ([int(d)", "# Calculates New Time new_time_array = [str(end_hours % 12 +", "'am': end_period.append('PM') else: end_period.append('AM') return MainTimer(new_time_joined, end_period, new_weekday, days) #", "and (period != timed.end_period or timed.end_period == 'AM'): new_time +=", "Current time plus End Time Total end_hours, end_mins = (current_hour", "== 'am': end_period.append('PM') else: end_period.append('AM') return MainTimer(new_time_joined, end_period, new_weekday, days)", "process_output(): new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}' if", "timed = process_time() def process_output(): new_time = f'New Time is", "1: new_time += f' -Total days: {timed.days}- <<' return new_time", "= weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx + days % 7) %", "import namedtuple MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days') def", "clock = end_hours // 12 if start_weekday: start_day_idx = weekdays.index(start_weekday.title())", "weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx + days % 7) % 7]", "+ end_minute) # Calculates Total days passed days = int(end_hours/24)", "12 if start_weekday: start_day_idx = weekdays.index(start_weekday.title()) new_weekday = weekdays[(start_day_idx +", "process_time(): current_hour, current_minute = ([int(t) for t in start_time.split(':')]) end_hour,", "Time Total end_hours, end_mins = (current_hour + end_hour, current_minute +" ]
[ "from .search_interface import SearchInterface from .detail_interface import DetailInterface from .user_interface", ".analyze_logs import AnalyzeLogs from .search_interface import SearchInterface from .detail_interface import", "import AnalyzeLogs from .search_interface import SearchInterface from .detail_interface import DetailInterface", "AnalyzeLogs from .search_interface import SearchInterface from .detail_interface import DetailInterface from", ".search_interface import SearchInterface from .detail_interface import DetailInterface from .user_interface import", "SearchInterface from .detail_interface import DetailInterface from .user_interface import UserInterface from", ".detail_interface import DetailInterface from .user_interface import UserInterface from .visualize_log_detail import", "import DetailInterface from .user_interface import UserInterface from .visualize_log_detail import VisualizeLogDetail", "from .analyze_logs import AnalyzeLogs from .search_interface import SearchInterface from .detail_interface", "import SearchInterface from .detail_interface import DetailInterface from .user_interface import UserInterface", "from .detail_interface import DetailInterface from .user_interface import UserInterface from .visualize_log_detail" ]
[ "= np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x, w,", "64 w = np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX =", "DFT import math k0 = 8.5 N = 64 w", "plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5) plt.axis([-N/2,N/2-1,min(y), max(y)]) plt.tight_layout()", "import numpy as np import sys sys.path.append('../../../software/models/') import dftModel as", "N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq. magnitude spectrum in", "dB: mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312)", "DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq. magnitude", "pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum:", "dftModel as DFT import math k0 = 8.5 N =", "import sys sys.path.append('../../../software/models/') import dftModel as DFT import math k0", "plt.title('positive freq. phase spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0,", "pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5) plt.axis([-N/2,N/2-1,min(y),", "import matplotlib.pyplot as plt import numpy as np import sys", "= DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq.", "w = np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x,", "sys.path.append('../../../software/models/') import dftModel as DFT import math k0 = 8.5", "max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c',", "matplotlib.pyplot as plt import numpy as np import sys sys.path.append('../../../software/models/')", "x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x, w, N) y", "N) y = DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311)", "spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse", "= DFT.dftAnal(x, w, N) y = DFT.dftSynth(mX, pX, N) plt.figure(1,", "numpy as np import sys sys.path.append('../../../software/models/') import dftModel as DFT", "mX, pX = DFT.dftAnal(x, w, N) y = DFT.dftSynth(mX, pX,", "mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase", "min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase spectrum: pX') plt.plot(np.arange(pX.size), pX,", "magnitude spectrum in dB: mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size,", "plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq. magnitude spectrum in dB:", "in dB: mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1])", "'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase spectrum:", "plt import numpy as np import sys sys.path.append('../../../software/models/') import dftModel", "IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5) plt.axis([-N/2,N/2-1,min(y), max(y)]) plt.tight_layout() plt.savefig('idft.png') plt.show()", "freq. magnitude spectrum in dB: mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)", "plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5)", "plt.title('positive freq. magnitude spectrum in dB: mX') plt.plot(np.arange(mX.size), mX, 'r',", "5)) plt.subplot(311) plt.title('positive freq. magnitude spectrum in dB: mX') plt.plot(np.arange(mX.size),", "plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)')", "plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5) plt.axis([-N/2,N/2-1,min(y), max(y)])", "DFT.dftAnal(x, w, N) y = DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5,", "plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase spectrum: pX') plt.plot(np.arange(pX.size),", "lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase spectrum: pX')", "np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x, w, N) y = DFT.dftSynth(mX,", "plt.subplot(312) plt.title('positive freq. phase spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)", "import dftModel as DFT import math k0 = 8.5 N", "plt.subplot(311) plt.title('positive freq. magnitude spectrum in dB: mX') plt.plot(np.arange(mX.size), mX,", "mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive", "as DFT import math k0 = 8.5 N = 64", "phase spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313)", "spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5) plt.axis([-N/2,N/2-1,min(y), max(y)]) plt.tight_layout() plt.savefig('idft.png')", "as np import sys sys.path.append('../../../software/models/') import dftModel as DFT import", "plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq.", "figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq. magnitude spectrum in dB: mX')", "freq. phase spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi])", "spectrum in dB: mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX),", "as plt import numpy as np import sys sys.path.append('../../../software/models/') import", "= np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x, w, N) y =", "y = DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive", "'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2),", "np import sys sys.path.append('../../../software/models/') import dftModel as DFT import math", "sys sys.path.append('../../../software/models/') import dftModel as DFT import math k0 =", "k0 = 8.5 N = 64 w = np.ones(N) x", "pX = DFT.dftAnal(x, w, N) y = DFT.dftSynth(mX, pX, N)", "pX, N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq. magnitude spectrum", "= 64 w = np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX", "8.5 N = 64 w = np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2))", "import math k0 = 8.5 N = 64 w =", "= 8.5 N = 64 w = np.ones(N) x =", "np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x, w, N)", "math k0 = 8.5 N = 64 w = np.ones(N)", "w, N) y = DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5, 5))", "N = 64 w = np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX,", "pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2,", "lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b'," ]
[ "Development :: Documentation\", \"Topic :: Office/Business\", \"Topic :: Text Processing", "import setup, find_packages setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run',", "[ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(), # metadata to display", "Code\": \"https://github.com/jerzydziewierz/typobs\", }, classifiers=[ \"Programming Language :: Python\", \"Topic ::", "as described in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on your", "\"Environment :: Console\", \"License :: OSI Approved :: Apache Software", "Console\", \"License :: OSI Approved :: Apache Software License\", ]", "# > pip install -e . from setuptools import setup,", ":: Text Processing :: Markup\", \"Development Status :: 5 -", "\"Development Status :: 5 - Production/Stable\", \"Environment :: Console\", \"License", "metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert between Typora", "packages=find_packages(), # metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert", "install -e . from setuptools import setup, find_packages setup( name='typobs',", "<gh_stars>0 # setup.py as described in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to", "if any project_urls={ \"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source Code\":", "classifiers=[ \"Programming Language :: Python\", \"Topic :: Documentation\", \"Topic ::", ":: Console\", \"License :: OSI Approved :: Apache Software License\",", "\"Programming Language :: Python\", \"Topic :: Documentation\", \"Topic :: Software", "'to_typora=to_typora:run', ] }, packages=find_packages(), # metadata to display on PyPI", "Documentation\", \"Topic :: Software Development :: Documentation\", \"Topic :: Office/Business\",", "'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(), # metadata to display on", "your system, run: # > pip install -e . from", "run: # > pip install -e . from setuptools import", "-e . from setuptools import setup, find_packages setup( name='typobs', version='0.0.3',", "on your system, run: # > pip install -e .", "Markup\", \"Development Status :: 5 - Production/Stable\", \"Environment :: Console\",", "Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source Code\": \"https://github.com/jerzydziewierz/typobs\", }, classifiers=[ \"Programming", "Production/Stable\", \"Environment :: Console\", \"License :: OSI Approved :: Apache", "Obsidian link styles\", keywords=\"Typora Obsidian Markdown link converter\", url=\"https://github.com/jerzydziewierz/typobs\", #", "}, packages=find_packages(), # metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\",", "\"https://github.com/jerzydziewierz/typobs\", \"Source Code\": \"https://github.com/jerzydziewierz/typobs\", }, classifiers=[ \"Programming Language :: Python\",", "\"Topic :: Office/Business\", \"Topic :: Text Processing :: Filters\", \"Topic", "project home page, if any project_urls={ \"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\":", "setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] },", ". from setuptools import setup, find_packages setup( name='typobs', version='0.0.3', entry_points={", "Markdown link converter\", url=\"https://github.com/jerzydziewierz/typobs\", # project home page, if any", "name='typobs', version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(),", ":: Documentation\", \"Topic :: Software Development :: Documentation\", \"Topic ::", "PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert between Typora and Obsidian link styles\",", "Office/Business\", \"Topic :: Text Processing :: Filters\", \"Topic :: Text", "on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert between Typora and Obsidian link", "# https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on your system, run: #", "link converter\", url=\"https://github.com/jerzydziewierz/typobs\", # project home page, if any project_urls={", "\"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source Code\": \"https://github.com/jerzydziewierz/typobs\", }, classifiers=[ \"Programming Language", "\"Topic :: Text Processing :: Filters\", \"Topic :: Text Processing", ":: Filters\", \"Topic :: Text Processing :: Markup\", \"Development Status", "from setuptools import setup, find_packages setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts':", ":: Text Processing :: Filters\", \"Topic :: Text Processing ::", "converter\", url=\"https://github.com/jerzydziewierz/typobs\", # project home page, if any project_urls={ \"Bug", "\"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source Code\": \"https://github.com/jerzydziewierz/typobs\", }, classifiers=[", "styles\", keywords=\"Typora Obsidian Markdown link converter\", url=\"https://github.com/jerzydziewierz/typobs\", # project home", "keywords=\"Typora Obsidian Markdown link converter\", url=\"https://github.com/jerzydziewierz/typobs\", # project home page,", "Processing :: Markup\", \"Development Status :: 5 - Production/Stable\", \"Environment", "find_packages setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ]", "Text Processing :: Markup\", \"Development Status :: 5 - Production/Stable\",", "\"Topic :: Text Processing :: Markup\", \"Development Status :: 5", "url=\"https://github.com/jerzydziewierz/typobs\", # project home page, if any project_urls={ \"Bug Tracker\":", "version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(), #", "# metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert between", "Documentation\", \"Topic :: Office/Business\", \"Topic :: Text Processing :: Filters\",", "# project home page, if any project_urls={ \"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\",", "\"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source Code\": \"https://github.com/jerzydziewierz/typobs\", }, classifiers=[ \"Programming Language ::", "5 - Production/Stable\", \"Environment :: Console\", \"License :: OSI Approved", "Language :: Python\", \"Topic :: Documentation\", \"Topic :: Software Development", ":: Documentation\", \"Topic :: Office/Business\", \"Topic :: Text Processing ::", "Software Development :: Documentation\", \"Topic :: Office/Business\", \"Topic :: Text", "project_urls={ \"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source Code\": \"https://github.com/jerzydziewierz/typobs\", },", "setuptools import setup, find_packages setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts': [", ":: Python\", \"Topic :: Documentation\", \"Topic :: Software Development ::", "\"https://github.com/jerzydziewierz/typobs\", }, classifiers=[ \"Programming Language :: Python\", \"Topic :: Documentation\",", "display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert between Typora and Obsidian", "setup.py as described in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on", "# setup.py as described in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install", "\"Source Code\": \"https://github.com/jerzydziewierz/typobs\", }, classifiers=[ \"Programming Language :: Python\", \"Topic", "\"License :: OSI Approved :: Apache Software License\", ] )", "\"Topic :: Software Development :: Documentation\", \"Topic :: Office/Business\", \"Topic", "Python\", \"Topic :: Documentation\", \"Topic :: Software Development :: Documentation\",", ":: 5 - Production/Stable\", \"Environment :: Console\", \"License :: OSI", "] }, packages=find_packages(), # metadata to display on PyPI author=\"<NAME>\",", "author_email=\"<EMAIL>\", description=\"Convert between Typora and Obsidian link styles\", keywords=\"Typora Obsidian", "# to install on your system, run: # > pip", "and Obsidian link styles\", keywords=\"Typora Obsidian Markdown link converter\", url=\"https://github.com/jerzydziewierz/typobs\",", "}, classifiers=[ \"Programming Language :: Python\", \"Topic :: Documentation\", \"Topic", "pip install -e . from setuptools import setup, find_packages setup(", "described in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on your system,", "setup, find_packages setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run',", "page, if any project_urls={ \"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source", "https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on your system, run: # >", "Filters\", \"Topic :: Text Processing :: Markup\", \"Development Status ::", "> pip install -e . from setuptools import setup, find_packages", "entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(), # metadata", "system, run: # > pip install -e . from setuptools", "Status :: 5 - Production/Stable\", \"Environment :: Console\", \"License ::", "'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(), # metadata to", "link styles\", keywords=\"Typora Obsidian Markdown link converter\", url=\"https://github.com/jerzydziewierz/typobs\", # project", "Processing :: Filters\", \"Topic :: Text Processing :: Markup\", \"Development", "install on your system, run: # > pip install -e", "home page, if any project_urls={ \"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\",", "Text Processing :: Filters\", \"Topic :: Text Processing :: Markup\",", ":: Markup\", \"Development Status :: 5 - Production/Stable\", \"Environment ::", "Typora and Obsidian link styles\", keywords=\"Typora Obsidian Markdown link converter\",", "to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert between Typora and", "Obsidian Markdown link converter\", url=\"https://github.com/jerzydziewierz/typobs\", # project home page, if", ":: Office/Business\", \"Topic :: Text Processing :: Filters\", \"Topic ::", "any project_urls={ \"Bug Tracker\": \"https://github.com/jerzydziewierz/typobs\", \"Documentation\": \"https://github.com/jerzydziewierz/typobs\", \"Source Code\": \"https://github.com/jerzydziewierz/typobs\",", "description=\"Convert between Typora and Obsidian link styles\", keywords=\"Typora Obsidian Markdown", ":: Software Development :: Documentation\", \"Topic :: Office/Business\", \"Topic ::", "in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on your system, run:", "author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Convert between Typora and Obsidian link styles\", keywords=\"Typora", "- Production/Stable\", \"Environment :: Console\", \"License :: OSI Approved ::", "\"Topic :: Documentation\", \"Topic :: Software Development :: Documentation\", \"Topic", "between Typora and Obsidian link styles\", keywords=\"Typora Obsidian Markdown link", "to install on your system, run: # > pip install" ]
[ "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } BASELINE_ONE_LOAD = { \"baseline_facts\": [", "None, \"subscription_manager_id\": \"RHN Classic and Red Hat Subscription Management\", \"system_profile\":", "\"<KEY>\" \"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\"", "{ \"display_name\": \"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE = { \"account\":", "} } \"entitlements\": { \"smart_management\": { \"is_entitled\": true } }", "\"display_name\": \"arch baseline\", } BASELINE_TWO_LOAD = { \"baseline_facts\": [ {\"name\":", "AUTH_HEADER = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "for readablity): { \"identity\": { \"internal\": { \"org_id\": \"9999\" },", "Management\", \"system_profile\": { \"salutation\": \"hi\", \"system_profile_exists\": False, \"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\",", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" } \"\"\" decoded AUTH_HEADER_NO_ACCT (newlines", "[ {\"name\": \"arch\", \"value\": \"x86_64\"}, {\"name\": \"phony.arch.fact\", \"value\": \"some value\"},", "\"No\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Number\", \"locale\":", "true, \"is_org_admin\": false, \"last_name\": \"Number\", \"locale\": \"en_US\", \"username\": \"nonumber\" }", "} SYSTEM_WITH_PROFILE = { \"account\": \"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\",", "\"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\": None, \"satellite_id\":", "\"values\": [ {\"name\": \"nested_one\", \"value\": \"one\"}, {\"name\": \"nested_two\", \"value\": \"two\"},", "], \"display_name\": \"cpu + mem baseline\", } BASELINE_PARTIAL_ONE = {\"baseline_facts\":", "\"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"No\", \"is_active\": true, \"is_internal\": true,", "+ mem baseline\", } BASELINE_THREE_LOAD = { \"baseline_facts\": [ {\"name\":", "}, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"Firstname\", \"is_active\":", "\"Lastname\", \"locale\": \"en_US\", \"username\": \"test_username\" } } \"entitlements\": { \"smart_management\":", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE", "None, \"satellite_id\": None, \"subscription_manager_id\": \"RHN Classic and Red Hat Subscription", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\"", "in real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { \"X-RH-IDENTITY\":", "readablity): { \"identity\": { \"internal\": { \"org_id\": \"9999\" }, \"type\":", "\"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\": None, \"satellite_id\": None,", "\"\"\" AUTH_HEADER_NO_ACCT = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "mem baseline\", } BASELINE_PARTIAL_ONE = {\"baseline_facts\": [{\"name\": \"hello\", \"value\": \"world\"}]}", "\"Number\", \"locale\": \"en_US\", \"username\": \"nonumber\" } } } \"\"\" AUTH_HEADER_NO_ACCT", "\"display_name\": \"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE = { \"account\": \"9876543\",", "\"Firstname\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Lastname\", \"locale\":", "\"entitlements\": { \"smart_management\": { \"is_entitled\": true } } } \"\"\"", "} } \"\"\" AUTH_HEADER = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\"", "[ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", }, \"tags\": [], \"updated\":", "(newlines added for readablity): { \"identity\": { \"internal\": { \"org_id\":", "AUTH_HEADER_NO_ACCT = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "can't happen in real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS =", "[{\"name\": \"cpu_sockets\", \"value\": \"16\"}]} ], \"display_name\": \"cpu + mem baseline\",", "\"16\"}, ], \"display_name\": \"cpu + mem baseline\", } BASELINE_THREE_LOAD =", "\"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", }, \"tags\": [],", "{ \"is_entitled\": true } } } \"\"\" AUTH_HEADER = {", "\"satellite_id\": None, \"subscription_manager_id\": \"RHN Classic and Red Hat Subscription Management\",", "\"phony.arch.fact\", \"value\": \"some value\"}, ], \"display_name\": \"arch baseline\", } BASELINE_TWO_LOAD", "\"64GB\"}, {\"name\": \"cpu_sockets\", \"value\": \"16\"}, ], \"display_name\": \"cpu + mem", "\"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE = { \"account\": \"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\":", "\"value\": \"16\"}]} ], \"display_name\": \"cpu + mem baseline\", } BASELINE_PARTIAL_ONE", "} AUTH_HEADER_NO_ENTITLEMENTS = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "}, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"No\", \"is_active\":", "\"value\": \"64GB\"}, {\"name\": \"cpu_sockets\", \"value\": \"16\"}, ], \"display_name\": \"cpu +", "CREATE_FROM_INVENTORY = { \"display_name\": \"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE =", "\"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE = { \"account\": \"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\",", "\"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"system_profile_exists\": False, \"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", },", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE = { \"X-RH-IDENTITY\": \"<KEY>\"", "\"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"Firstname\", \"is_active\": true, \"is_internal\":", "\"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"No\", \"is_active\": true, \"is_internal\":", "\"org_id\": \"5678\" }, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\":", "false, \"last_name\": \"Number\", \"locale\": \"en_US\", \"username\": \"nonumber\" } } }", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> }", "\"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE = { \"account\": \"9876543\", \"bios_uuid\":", "\"<KEY>f\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\"", "\"ABCDE\", \"baseline_facts\": [ { \"name\": \"hello\", \"values\": [ {\"name\": \"nested_one\",", "\"hi\", \"system_profile_exists\": False, \"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\",", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } BASELINE_ONE_LOAD = { \"baseline_facts\":", "AUTH_HEADER_NO_ENTITLEMENTS = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "readability): { \"identity\": { \"account_number\": \"1234\", \"internal\": { \"org_id\": \"5678\"", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS", "decoded AUTH_HEADER_NO_ACCT (newlines added for readablity): { \"identity\": { \"internal\":", "\"two\"}, ], } ], } BASELINE_PARTIAL_CONFLICT = {\"display_name\": \"arch baseline\"}", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS = {", "\"display_name\": \"ABCDE\", \"baseline_facts\": [ { \"name\": \"hello\", \"values\": [ {\"name\":", "\"16\"}]} ], \"display_name\": \"cpu + mem baseline\", } BASELINE_PARTIAL_ONE =", "\"identity\": { \"account_number\": \"1234\", \"internal\": { \"org_id\": \"5678\" }, \"type\":", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" }", "} ], } BASELINE_PARTIAL_CONFLICT = {\"display_name\": \"arch baseline\"} CREATE_FROM_INVENTORY =", "\"org_id\": \"9999\" }, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\":", "\"email\": \"<EMAIL>\", \"first_name\": \"Firstname\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false,", "{ \"baseline_facts\": [ {\"name\": \"arch\", \"value\": \"x86_64\"}, {\"name\": \"phony.arch.fact\", \"value\":", "\"one\"}, {\"name\": \"nested_two\", \"value\": \"two\"}, ], } ], } BASELINE_PARTIAL_CONFLICT", "real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { \"X-RH-IDENTITY\": \"<KEY>f\"", "\"email\": \"<EMAIL>\", \"first_name\": \"No\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false,", "\"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } # this can't", "\"baseline_facts\": [ {\"name\": \"memory\", \"value\": \"64GB\"}, {\"name\": \"cpu_sockets\", \"value\": \"16\"},", "for readability): { \"identity\": { \"account_number\": \"1234\", \"internal\": { \"org_id\":", "\"\"\" decoded AUTH_HEADER_NO_ACCT (newlines added for readablity): { \"identity\": {", "[ {\"name\": \"nested\", \"values\": [{\"name\": \"cpu_sockets\", \"value\": \"16\"}]} ], \"display_name\":", "\"locale\": \"en_US\", \"username\": \"test_username\" } } \"entitlements\": { \"smart_management\": {", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>", "], } ], } BASELINE_PARTIAL_CONFLICT = {\"display_name\": \"arch baseline\"} CREATE_FROM_INVENTORY", "} BASELINE_PARTIAL_CONFLICT = {\"display_name\": \"arch baseline\"} CREATE_FROM_INVENTORY = { \"display_name\":", "{\"name\": \"memory\", \"value\": \"64GB\"}, {\"name\": \"cpu_sockets\", \"value\": \"16\"}, ], \"display_name\":", "= { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>", "Classic and Red Hat Subscription Management\", \"system_profile\": { \"salutation\": \"hi\",", "\"value\": \"some value\"}, ], \"display_name\": \"arch baseline\", } BASELINE_TWO_LOAD =", "\"<KEY>\" \"<KEY>\" \"<KEY> } BASELINE_ONE_LOAD = { \"baseline_facts\": [ {\"name\":", "{\"name\": \"nested_two\", \"value\": \"two\"}, ], } ], } BASELINE_PARTIAL_CONFLICT =", "and Red Hat Subscription Management\", \"system_profile\": { \"salutation\": \"hi\", \"system_profile_exists\":", "\"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\": None, \"satellite_id\": None, \"subscription_manager_id\": \"RHN Classic", "\"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } # this", "} BASELINE_THREE_LOAD = { \"baseline_facts\": [ {\"name\": \"nested\", \"values\": [{\"name\":", "= { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"nested_two\", \"value\": \"two\"}, ], } ], } BASELINE_PARTIAL_CONFLICT = {\"display_name\":", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS = { \"X-RH-IDENTITY\": \"<KEY>\"", "{ \"identity\": { \"internal\": { \"org_id\": \"9999\" }, \"type\": \"User\",", "None, \"fqdn\": None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"],", "\"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\": None,", "[ {\"name\": \"memory\", \"value\": \"64GB\"}, {\"name\": \"cpu_sockets\", \"value\": \"16\"}, ],", "{ \"salutation\": \"hi\", \"system_profile_exists\": False, \"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ],", "\"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\": None, \"fqdn\": None, \"id\":", "\"2018-01-31T13:00:00.100010Z\", \"display_name\": None, \"fqdn\": None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\":", "{ \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> }", "} BASELINE_TWO_LOAD = { \"baseline_facts\": [ {\"name\": \"memory\", \"value\": \"64GB\"},", "\"smart_management\": { \"is_entitled\": true } } } \"\"\" AUTH_HEADER =", "\"name\": \"hello\", \"values\": [ {\"name\": \"nested_one\", \"value\": \"one\"}, {\"name\": \"nested_two\",", "= { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\"", "\"is_entitled\": true } } } \"\"\" AUTH_HEADER = { \"X-RH-IDENTITY\":", "\"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", }, \"tags\": [], \"updated\": \"2018-01-31T14:00:00.500000Z\",", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE = {", "{ \"org_id\": \"9999\" }, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\",", "\"account\": \"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\": None, \"fqdn\": None,", "anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { \"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"display_name\": \"cpu + mem baseline\", } BASELINE_PARTIAL_ONE = {\"baseline_facts\": [{\"name\":", "\"en_US\", \"username\": \"test_username\" } } \"entitlements\": { \"smart_management\": { \"is_entitled\":", "\"cpu + mem baseline\", } BASELINE_PARTIAL_ONE = {\"baseline_facts\": [{\"name\": \"hello\",", "\"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\": None, \"fqdn\": None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\":", "{ \"identity\": { \"account_number\": \"1234\", \"internal\": { \"org_id\": \"5678\" },", "= {\"baseline_facts\": [{\"name\": \"hello\", \"value\": \"world\"}]} BASELINE_PARTIAL_TWO = { \"display_name\":", "AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { \"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"fX0K\" } \"\"\" decoded AUTH_HEADER_NO_ACCT (newlines added for readablity): {", "\"value\": \"x86_64\"}, {\"name\": \"phony.arch.fact\", \"value\": \"some value\"}, ], \"display_name\": \"arch", "{ \"email\": \"<EMAIL>\", \"first_name\": \"No\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\":", "None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\",", "\"world\"}]} BASELINE_PARTIAL_TWO = { \"display_name\": \"ABCDE\", \"baseline_facts\": [ { \"name\":", "\"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" } \"\"\" decoded AUTH_HEADER_NO_ACCT (newlines added", "], \"display_name\": \"arch baseline\", } BASELINE_TWO_LOAD = { \"baseline_facts\": [", "Hat Subscription Management\", \"system_profile\": { \"salutation\": \"hi\", \"system_profile_exists\": False, \"installed_packages\":", "\"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" } \"\"\" decoded AUTH_HEADER_NO_ACCT (newlines added for readablity):", "= { \"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "} BASELINE_PARTIAL_ONE = {\"baseline_facts\": [{\"name\": \"hello\", \"value\": \"world\"}]} BASELINE_PARTIAL_TWO =", "\"cpu_sockets\", \"value\": \"16\"}]} ], \"display_name\": \"cpu + mem baseline\", }", "= { \"baseline_facts\": [ {\"name\": \"memory\", \"value\": \"64GB\"}, {\"name\": \"cpu_sockets\",", "{ \"smart_management\": { \"is_entitled\": true } } } \"\"\" AUTH_HEADER", "\"arch baseline\"} CREATE_FROM_INVENTORY = { \"display_name\": \"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", }", "\"<KEY>\" \"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\"", "+ mem baseline\", } BASELINE_PARTIAL_ONE = {\"baseline_facts\": [{\"name\": \"hello\", \"value\":", "} \"\"\" AUTH_HEADER_NO_ACCT = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\"", "\"rhel_machine_id\": None, \"satellite_id\": None, \"subscription_manager_id\": \"RHN Classic and Red Hat", "{ \"name\": \"hello\", \"values\": [ {\"name\": \"nested_one\", \"value\": \"one\"}, {\"name\":", "AUTH_HEADER_SMART_MGMT_FALSE = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "} } } \"\"\" AUTH_HEADER = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\"", "\"is_org_admin\": false, \"last_name\": \"Number\", \"locale\": \"en_US\", \"username\": \"nonumber\" } }", "{ \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "{ \"email\": \"<EMAIL>\", \"first_name\": \"Firstname\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\":", "\"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\": None, \"satellite_id\": None, \"subscription_manager_id\": \"RHN", "{ \"account_number\": \"1234\", \"internal\": { \"org_id\": \"5678\" }, \"type\": \"User\",", "{ \"baseline_facts\": [ {\"name\": \"memory\", \"value\": \"64GB\"}, {\"name\": \"cpu_sockets\", \"value\":", "\"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", }, \"tags\": [], \"updated\": \"2018-01-31T14:00:00.500000Z\", }", "\"baseline_facts\": [ {\"name\": \"nested\", \"values\": [{\"name\": \"cpu_sockets\", \"value\": \"16\"}]} ],", "\"value\": \"16\"}, ], \"display_name\": \"cpu + mem baseline\", } BASELINE_THREE_LOAD", "{\"name\": \"arch\", \"value\": \"x86_64\"}, {\"name\": \"phony.arch.fact\", \"value\": \"some value\"}, ],", "} \"\"\" AUTH_HEADER = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"fqdn\": None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\":", "AUTH_HEADER_NO_ACCT (newlines added for readablity): { \"identity\": { \"internal\": {", "mem baseline\", } BASELINE_THREE_LOAD = { \"baseline_facts\": [ {\"name\": \"nested\",", "\"locale\": \"en_US\", \"username\": \"nonumber\" } } } \"\"\" AUTH_HEADER_NO_ACCT =", "[\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\": None, \"satellite_id\": None, \"subscription_manager_id\":", "} \"entitlements\": { \"smart_management\": { \"is_entitled\": true } } }", "= { \"account\": \"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\": None,", "\"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\"", "\"last_name\": \"Number\", \"locale\": \"en_US\", \"username\": \"nonumber\" } } } \"\"\"", "baseline\", } BASELINE_PARTIAL_ONE = {\"baseline_facts\": [{\"name\": \"hello\", \"value\": \"world\"}]} BASELINE_PARTIAL_TWO", "= { \"baseline_facts\": [ {\"name\": \"arch\", \"value\": \"x86_64\"}, {\"name\": \"phony.arch.fact\",", "baseline\"} CREATE_FROM_INVENTORY = { \"display_name\": \"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE", "decoded AUTH_HEADER (newlines added for readability): { \"identity\": { \"account_number\":", "} \"\"\" decoded AUTH_HEADER_NO_ACCT (newlines added for readablity): { \"identity\":", "\"\"\" AUTH_HEADER = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "{ \"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "False, \"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", }, \"tags\":", "Subscription Management\", \"system_profile\": { \"salutation\": \"hi\", \"system_profile_exists\": False, \"installed_packages\": [", "\"internal\": { \"org_id\": \"5678\" }, \"type\": \"User\", \"user\": { \"email\":", "} AUTH_HEADER_SMART_MGMT_FALSE = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"display_name\": None, \"fqdn\": None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\",", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE =", "\"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Number\", \"locale\": \"en_US\", \"username\": \"nonumber\"", "\"some value\"}, ], \"display_name\": \"arch baseline\", } BASELINE_TWO_LOAD = {", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" } \"\"\" decoded AUTH_HEADER_NO_ACCT", "\"baseline_facts\": [ { \"name\": \"hello\", \"values\": [ {\"name\": \"nested_one\", \"value\":", "Red Hat Subscription Management\", \"system_profile\": { \"salutation\": \"hi\", \"system_profile_exists\": False,", "(newlines added for readability): { \"identity\": { \"account_number\": \"1234\", \"internal\":", "\"baseline_facts\": [ {\"name\": \"arch\", \"value\": \"x86_64\"}, {\"name\": \"phony.arch.fact\", \"value\": \"some", "\"<EMAIL>\", \"first_name\": \"No\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\":", "added for readability): { \"identity\": { \"account_number\": \"1234\", \"internal\": {", "SYSTEM_WITH_PROFILE = { \"account\": \"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\":", "\"RHN Classic and Red Hat Subscription Management\", \"system_profile\": { \"salutation\":", "[ {\"name\": \"nested_one\", \"value\": \"one\"}, {\"name\": \"nested_two\", \"value\": \"two\"}, ],", "\"hello\", \"value\": \"world\"}]} BASELINE_PARTIAL_TWO = { \"display_name\": \"ABCDE\", \"baseline_facts\": [", "false, \"last_name\": \"Lastname\", \"locale\": \"en_US\", \"username\": \"test_username\" } } \"entitlements\":", "AUTH_HEADER (newlines added for readability): { \"identity\": { \"account_number\": \"1234\",", "\"values\": [{\"name\": \"cpu_sockets\", \"value\": \"16\"}]} ], \"display_name\": \"cpu + mem", "{\"baseline_facts\": [{\"name\": \"hello\", \"value\": \"world\"}]} BASELINE_PARTIAL_TWO = { \"display_name\": \"ABCDE\",", "\"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } # this can't happen", "\"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"Firstname\", \"is_active\": true,", "\"value\": \"one\"}, {\"name\": \"nested_two\", \"value\": \"two\"}, ], } ], }", "\"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\": None, \"fqdn\": None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\",", "} # this can't happen in real life, adding test", "test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { \"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" } \"\"\" decoded", "\"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\":", "life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { \"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\"", "\"is_org_admin\": false, \"last_name\": \"Lastname\", \"locale\": \"en_US\", \"username\": \"test_username\" } }", "= {\"display_name\": \"arch baseline\"} CREATE_FROM_INVENTORY = { \"display_name\": \"created_from_inventory\", \"inventory_uuid\":", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\"", "\"hello\", \"values\": [ {\"name\": \"nested_one\", \"value\": \"one\"}, {\"name\": \"nested_two\", \"value\":", "{\"name\": \"nested_one\", \"value\": \"one\"}, {\"name\": \"nested_two\", \"value\": \"two\"}, ], }", "} } } \"\"\" AUTH_HEADER_NO_ACCT = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\"", "true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Number\", \"locale\": \"en_US\", \"username\":", "\"cpu + mem baseline\", } BASELINE_THREE_LOAD = { \"baseline_facts\": [", "{\"display_name\": \"arch baseline\"} CREATE_FROM_INVENTORY = { \"display_name\": \"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\",", "\"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"No\", \"is_active\": true,", "\"value\": \"two\"}, ], } ], } BASELINE_PARTIAL_CONFLICT = {\"display_name\": \"arch", "\"last_name\": \"Lastname\", \"locale\": \"en_US\", \"username\": \"test_username\" } } \"entitlements\": {", "\"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } # this can't happen in", "\"<EMAIL>\", \"first_name\": \"Firstname\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\":", "{ \"display_name\": \"ABCDE\", \"baseline_facts\": [ { \"name\": \"hello\", \"values\": [", "true } } } \"\"\" AUTH_HEADER = { \"X-RH-IDENTITY\": \"<KEY>\"", "\"<KEY> } BASELINE_ONE_LOAD = { \"baseline_facts\": [ {\"name\": \"arch\", \"value\":", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE = { \"X-RH-IDENTITY\":", "this can't happen in real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS", "\"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } # this can't happen in real life,", "= { \"display_name\": \"ABCDE\", \"baseline_facts\": [ { \"name\": \"hello\", \"values\":", "BASELINE_TWO_LOAD = { \"baseline_facts\": [ {\"name\": \"memory\", \"value\": \"64GB\"}, {\"name\":", "\"internal\": { \"org_id\": \"9999\" }, \"type\": \"User\", \"user\": { \"email\":", "\"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Number\", \"locale\": \"en_US\",", "\"username\": \"nonumber\" } } } \"\"\" AUTH_HEADER_NO_ACCT = { \"X-RH-IDENTITY\":", "\"nested_one\", \"value\": \"one\"}, {\"name\": \"nested_two\", \"value\": \"two\"}, ], } ],", "\"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\", \"insights_id\": \"00000000-28af-11e9-9ab0-c85b761454fa\", \"ip_addresses\": [\"10.0.0.3\", \"fc00:db20:35b:7399::5\"], \"mac_addresses\": [\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"],", "\"x86_64\"}, {\"name\": \"phony.arch.fact\", \"value\": \"some value\"}, ], \"display_name\": \"arch baseline\",", "], \"display_name\": \"cpu + mem baseline\", } BASELINE_THREE_LOAD = {", "= { \"baseline_facts\": [ {\"name\": \"nested\", \"values\": [{\"name\": \"cpu_sockets\", \"value\":", "value\"}, ], \"display_name\": \"arch baseline\", } BASELINE_TWO_LOAD = { \"baseline_facts\":", "BASELINE_PARTIAL_CONFLICT = {\"display_name\": \"arch baseline\"} CREATE_FROM_INVENTORY = { \"display_name\": \"created_from_inventory\",", "\"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } # this can't happen in real life, adding", "\"9999\" }, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"No\",", "\"subscription_manager_id\": \"RHN Classic and Red Hat Subscription Management\", \"system_profile\": {", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS =", "\"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\"", "\"username\": \"test_username\" } } \"entitlements\": { \"smart_management\": { \"is_entitled\": true", "\"00:00:00:00:00:00\"], \"rhel_machine_id\": None, \"satellite_id\": None, \"subscription_manager_id\": \"RHN Classic and Red", "adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { \"X-RH-IDENTITY\": \"<KEY>f\" \"<KEY>\" \"<KEY>\"", "\"en_US\", \"username\": \"nonumber\" } } } \"\"\" AUTH_HEADER_NO_ACCT = {", "\"first_name\": \"No\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Number\",", "\"memory\", \"value\": \"64GB\"}, {\"name\": \"cpu_sockets\", \"value\": \"16\"}, ], \"display_name\": \"cpu", "\"identity\": { \"internal\": { \"org_id\": \"9999\" }, \"type\": \"User\", \"user\":", "[{\"name\": \"hello\", \"value\": \"world\"}]} BASELINE_PARTIAL_TWO = { \"display_name\": \"ABCDE\", \"baseline_facts\":", "\"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_NO_ENTITLEMENTS = { \"X-RH-IDENTITY\":", "\"5678\" }, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"Firstname\",", "# this can't happen in real life, adding test anyway", "[\"52:54:00:cd:ae:00\", \"00:00:00:00:00:00\"], \"rhel_machine_id\": None, \"satellite_id\": None, \"subscription_manager_id\": \"RHN Classic and", "added for readablity): { \"identity\": { \"internal\": { \"org_id\": \"9999\"", "\"<KEY>\" \"<KEY> } BASELINE_ONE_LOAD = { \"baseline_facts\": [ {\"name\": \"arch\",", "\"test_username\" } } \"entitlements\": { \"smart_management\": { \"is_entitled\": true }", "{\"name\": \"cpu_sockets\", \"value\": \"16\"}, ], \"display_name\": \"cpu + mem baseline\",", "baseline\", } BASELINE_THREE_LOAD = { \"baseline_facts\": [ {\"name\": \"nested\", \"values\":", "baseline\", } BASELINE_TWO_LOAD = { \"baseline_facts\": [ {\"name\": \"memory\", \"value\":", "\"system_profile\": { \"salutation\": \"hi\", \"system_profile_exists\": False, \"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\",", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } #", "{\"name\": \"phony.arch.fact\", \"value\": \"some value\"}, ], \"display_name\": \"arch baseline\", }", "\"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" } \"\"\" decoded AUTH_HEADER_NO_ACCT (newlines added for", "[ { \"name\": \"hello\", \"values\": [ {\"name\": \"nested_one\", \"value\": \"one\"},", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\" \"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" }", "\"\"\" decoded AUTH_HEADER (newlines added for readability): { \"identity\": {", "], } BASELINE_PARTIAL_CONFLICT = {\"display_name\": \"arch baseline\"} CREATE_FROM_INVENTORY = {", "\"arch\", \"value\": \"x86_64\"}, {\"name\": \"phony.arch.fact\", \"value\": \"some value\"}, ], \"display_name\":", "{ \"org_id\": \"5678\" }, \"type\": \"User\", \"user\": { \"email\": \"<EMAIL>\",", "\"arch baseline\", } BASELINE_TWO_LOAD = { \"baseline_facts\": [ {\"name\": \"memory\",", "{\"name\": \"nested\", \"values\": [{\"name\": \"cpu_sockets\", \"value\": \"16\"}]} ], \"display_name\": \"cpu", "true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Lastname\", \"locale\": \"en_US\", \"username\":", "= { \"display_name\": \"created_from_inventory\", \"inventory_uuid\": \"df925152-c45d-11e9-a1f0-c85b761454fa\", } SYSTEM_WITH_PROFILE = {", "BASELINE_PARTIAL_TWO = { \"display_name\": \"ABCDE\", \"baseline_facts\": [ { \"name\": \"hello\",", "} } \"\"\" AUTH_HEADER_NO_ACCT = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\"", "\"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Lastname\", \"locale\": \"en_US\",", "\"<KEY>l\" \"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu\" \"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==\" } # this can't happen in real", "\"account_number\": \"1234\", \"internal\": { \"org_id\": \"5678\" }, \"type\": \"User\", \"user\":", "\"display_name\": \"cpu + mem baseline\", } BASELINE_THREE_LOAD = { \"baseline_facts\":", "BASELINE_THREE_LOAD = { \"baseline_facts\": [ {\"name\": \"nested\", \"values\": [{\"name\": \"cpu_sockets\",", "\"value\": \"world\"}]} BASELINE_PARTIAL_TWO = { \"display_name\": \"ABCDE\", \"baseline_facts\": [ {", "\"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } BASELINE_ONE_LOAD", "\"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\": None, \"fqdn\": None, \"id\": \"bbbbbbbb-28ae-11e9-afd9-c85b761454fa\",", "BASELINE_PARTIAL_ONE = {\"baseline_facts\": [{\"name\": \"hello\", \"value\": \"world\"}]} BASELINE_PARTIAL_TWO = {", "\"user\": { \"email\": \"<EMAIL>\", \"first_name\": \"Firstname\", \"is_active\": true, \"is_internal\": true,", "{ \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi\" \"<KEY>\"", "BASELINE_ONE_LOAD = { \"baseline_facts\": [ {\"name\": \"arch\", \"value\": \"x86_64\"}, {\"name\":", "\"nested\", \"values\": [{\"name\": \"cpu_sockets\", \"value\": \"16\"}]} ], \"display_name\": \"cpu +", "\"cpu_sockets\", \"value\": \"16\"}, ], \"display_name\": \"cpu + mem baseline\", }", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9\" \"fX0K\" } \"\"\"", "} BASELINE_ONE_LOAD = { \"baseline_facts\": [ {\"name\": \"arch\", \"value\": \"x86_64\"},", "{ \"internal\": { \"org_id\": \"9999\" }, \"type\": \"User\", \"user\": {", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } BASELINE_ONE_LOAD = {", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"", "true, \"is_org_admin\": false, \"last_name\": \"Lastname\", \"locale\": \"en_US\", \"username\": \"test_username\" }", "happen in real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = {", "\"first_name\": \"Firstname\", \"is_active\": true, \"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Lastname\",", "\"<KEY>\" \"<KEY>\" \"<KEY> } AUTH_HEADER_SMART_MGMT_FALSE = { \"X-RH-IDENTITY\": \"<KEY>\" \"<KEY>\"", "\"nonumber\" } } } \"\"\" AUTH_HEADER_NO_ACCT = { \"X-RH-IDENTITY\": \"<KEY>\"", "{ \"baseline_facts\": [ {\"name\": \"nested\", \"values\": [{\"name\": \"cpu_sockets\", \"value\": \"16\"}]}", "\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY> } BASELINE_ONE_LOAD =", "\"is_internal\": true, \"is_org_admin\": false, \"last_name\": \"Lastname\", \"locale\": \"en_US\", \"username\": \"test_username\"", "\"salutation\": \"hi\", \"system_profile_exists\": False, \"installed_packages\": [ \"openssl-1.1.1c-2.fc30.x86_64\", \"python2-libs-2.7.16-2.fc30.x86_64\", ], \"id\":", "{ \"account\": \"9876543\", \"bios_uuid\": \"e380fd4a-28ae-11e9-974c-c85b761454fb\", \"created\": \"2018-01-31T13:00:00.100010Z\", \"display_name\": None, \"fqdn\":", "\"1234\", \"internal\": { \"org_id\": \"5678\" }, \"type\": \"User\", \"user\": {" ]
[ "i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in", "m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j)", "range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i in range(len(day_list)): print('i='+str(i))", "newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and", "i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i", "i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in range(len(newlist)):", "data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for", "range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1])", "rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m]", "range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP']", "acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1]", "print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if", "if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq)", "rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价':", "m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in range(len(newlist)): acc_20gp=0 acc_40gp=0", "numpy as np import pandas as pd import matplotlib.pyplot as", "for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[]", "acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if", "newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list)", "flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1", "range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])): if", "print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq", "in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for", "i in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for", "newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if", "<reponame>Elfenreigen/MCM-2021-C-SJTU-Test #####Time Flow Simulation###### import numpy as np import pandas", "total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list:", "k in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i in", "in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD')", "i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[]", "for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i", "i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0", "range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z in", "newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in", "pandas as pd import matplotlib.pyplot as plt from datetime import", "import datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing", "data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i", "or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2] in or_name:", "acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ']", "del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100", "range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in", "i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum()", "j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14)", "in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list:", "plt from datetime import timedelta import datetime import csv data=pd.read_excel('CF66-all.xlsx')", "datetime import timedelta import datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx')", "for z in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0:", "thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0])", "day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14)", "Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in range(len(data)): if", "i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in", "print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ':", "newlist[k].iloc[j,15]+=acc_40hq for i in newlist: print('revenue:'+str(i['AMT'].sum())) print('newrevenue:'+str(i['new_AMT'].sum())) newlist[0].to_csv('voyage1.csv') newlist[1].to_csv('voyage2.csv') newlist[2].to_csv('voyage3.csv')", "in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z", "data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[]", "data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数'])", "acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp", "print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ']", "i['new_AMT']=i['AMT'] for k in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for", "raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in", "i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in", "last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day", "np import pandas as pd import matplotlib.pyplot as plt from", "as pd import matplotlib.pyplot as plt from datetime import timedelta", "raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist:", "first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and", "import numpy as np import pandas as pd import matplotlib.pyplot", "data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2", "Flow Simulation###### import numpy as np import pandas as pd", "rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT'])", "if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in newlist: print('revenue:'+str(i['AMT'].sum())) print('newrevenue:'+str(i['new_AMT'].sum())) newlist[0].to_csv('voyage1.csv')", "matplotlib.pyplot as plt from datetime import timedelta import datetime import", "i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for", "rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14)", "or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data)", "newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i", "newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in newlist: print('revenue:'+str(i['AMT'].sum())) print('newrevenue:'+str(i['new_AMT'].sum()))", "i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k))", "print('k='+str(k)) for i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for", "import timedelta import datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6.", "acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0]", "from datetime import timedelta import datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True)", "raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口'))", "in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])):", "and newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价':", "if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if", "Simulation###### import numpy as np import pandas as pd import", "in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i in range(len(day_list)):", "or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in", "print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day", "data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2])", "newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in", "acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP':", "and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])): print('z='+str(z)) if", "day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[]", "for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for", "for k in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i", "acc_40hq=0 print('k='+str(k)) for i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i])", "rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp)", "if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if", "import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique()", "acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP']", "newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in newlist: print('revenue:'+str(i['AMT'].sum())) print('newrevenue:'+str(i['new_AMT'].sum())) newlist[0].to_csv('voyage1.csv') newlist[1].to_csv('voyage2.csv')", "newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and", "if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])):", "raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule)", "if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for", "rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in range(len(data)):", "flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if", "as np import pandas as pd import matplotlib.pyplot as plt", "as plt from datetime import timedelta import datetime import csv", "i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in range(len(newlist)): acc_20gp=0", "for i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j", "newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp)", "group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1])", "if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag)", "in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list:", "data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for", "import pandas as pd import matplotlib.pyplot as plt from datetime", "data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1", "j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for", "#####Time Flow Simulation###### import numpy as np import pandas as", "in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i", "for i in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE']", "for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i", "in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in", "data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口')) group_rule=thisrule.groupby(['开始天数','结束天数']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i", "import matplotlib.pyplot as plt from datetime import timedelta import datetime", "for j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1:", "z in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1", "if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in newlist:", "timedelta import datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing", "print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp", "strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2] in", "flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0:", "csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0", "acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP']", "pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2]", "rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for", "flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and", "newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days", "newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in newlist: print('revenue:'+str(i['AMT'].sum()))", "if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)):", "for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT']", "m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2", "newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])): print('z='+str(z))", "datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx')", "in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP']", "flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP':", "in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k", "pd import matplotlib.pyplot as plt from datetime import timedelta import" ]
[ "0, 1, 2, 3], 0.1, 0, np.inf), ([-3, -2, -1,", "2, -3, 4, 5], 0.1, 0, 4), ([-3, -2, -1,", "8, 9, 10], 0.1, 0, 4), ([-3, -2, -1, 0,", "import numpy as np from fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize( \"w,", "3), ( [-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45,", "0, 1.93, ), ], ) def test_adaptive_significance_threshold(w, q, offset, expected):", "7, 8, 9, 10], 0.15, 0, 3), ( [-1.52, 1.93,", "([-3, -2, -1, 0, 1, 2, 3], 0.1, 0, np.inf),", "[-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38],", "6, 7, 8, 9, 10], 0.1, 0, 4), ([-3, -2,", "-2, -1, 0, 1, 2, 3], 0.1, 0, np.inf), ([-3,", "-0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38], 0.1, 0, 1.93,", "pytest import numpy as np from fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize(", "7, 8, 9, 10], 0.1, 0, 4), ([-3, -2, -1,", "5], 0.1, 0, 1), ([-1, 2, -3, 4, 5], 0.1,", "2, 3], 0.1, 0, np.inf), ([-3, -2, -1, 0, 1,", "4, 5, 6, 7, 8, 9, 10], 0.15, 0, 3),", "1.21, -0.39, 0.08, -1.45, 0.31, -1.38], 0.1, 0, 1.93, ),", "offset, expected\", [ ([1, 2, 3, 4, 5], 0.1, 0,", "0, 1), ([-1, 2, -3, 4, 5], 0.1, 0, 4),", "expected): w = np.array(w) threshold = adaptive_significance_threshold(w, q, offset=offset) assert", "6, 7, 8, 9, 10], 0.15, 0, 3), ( [-1.52,", "-3, 4, 5], 0.1, 0, 4), ([-3, -2, -1, 0,", "-1, 0, 1, 2, 3, 4, 5, 6, 7, 8,", "1, 2, 3, 4, 5, 6, 7, 8, 9, 10],", "-0.39, 0.08, -1.45, 0.31, -1.38], 0.1, 0, 1.93, ), ],", "-1, 0, 1, 2, 3], 0.1, 0, np.inf), ([-3, -2,", "0, 4), ([-3, -2, -1, 0, 1, 2, 3], 0.1,", "expected\", [ ([1, 2, 3, 4, 5], 0.1, 0, 1),", "0.1, 0, 1.93, ), ], ) def test_adaptive_significance_threshold(w, q, offset,", "0.1, 0, 1), ([-1, 2, -3, 4, 5], 0.1, 0,", "0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3],", "adaptive_significance_threshold @pytest.mark.parametrize( \"w, q, offset, expected\", [ ([1, 2, 3,", "0.1, 0, np.inf), ([-3, -2, -1, 0, 1, 2, 3,", "from fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize( \"w, q, offset, expected\", [", "5, 6, 7, 8, 9, 10], 0.15, 0, 3), (", "0, np.inf), ([-3, -2, -1, 0, 1, 2, 3, 4,", "offset, expected): w = np.array(w) threshold = adaptive_significance_threshold(w, q, offset=offset)", "4, 5, 6, 7, 8, 9, 10], 0.1, 0, 4),", "q, offset, expected\", [ ([1, 2, 3, 4, 5], 0.1,", "4), ([-3, -2, -1, 0, 1, 2, 3, 4, 5,", "import adaptive_significance_threshold @pytest.mark.parametrize( \"w, q, offset, expected\", [ ([1, 2,", "5, 6, 7, 8, 9, 10], 0.1, 0, 4), ([-3,", "as np from fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize( \"w, q, offset,", "2, 3, 4, 5, 6, 7, 8, 9, 10], 0.15,", "3, 4, 5, 6, 7, 8, 9, 10], 0.15, 0,", "1, 2, 3], 0.1, 0, np.inf), ([-3, -2, -1, 0,", "1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38], 0.1,", "-1.38], 0.1, 0, 1.93, ), ], ) def test_adaptive_significance_threshold(w, q,", "8, 9, 10], 0.15, 0, 3), ( [-1.52, 1.93, -0.76,", "5], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2,", "0.08, -1.45, 0.31, -1.38], 0.1, 0, 1.93, ), ], )", "test_adaptive_significance_threshold(w, q, offset, expected): w = np.array(w) threshold = adaptive_significance_threshold(w,", "4, 5], 0.1, 0, 1), ([-1, 2, -3, 4, 5],", "fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize( \"w, q, offset, expected\", [ ([1,", "-2, -1, 0, 1, 2, 3, 4, 5, 6, 7,", "10], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2,", "0, 3), ( [-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08,", "-1.45, 0.31, -1.38], 0.1, 0, 1.93, ), ], ) def", "10], 0.15, 0, 3), ( [-1.52, 1.93, -0.76, -0.35, 1.21,", "2, 3, 4, 5, 6, 7, 8, 9, 10], 0.1,", "), ], ) def test_adaptive_significance_threshold(w, q, offset, expected): w =", "2, 3, 4, 5], 0.1, 0, 1), ([-1, 2, -3,", "3, 4, 5], 0.1, 0, 1), ([-1, 2, -3, 4,", ") def test_adaptive_significance_threshold(w, q, offset, expected): w = np.array(w) threshold", "\"w, q, offset, expected\", [ ([1, 2, 3, 4, 5],", "def test_adaptive_significance_threshold(w, q, offset, expected): w = np.array(w) threshold =", "-0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38], 0.1, 0,", "], ) def test_adaptive_significance_threshold(w, q, offset, expected): w = np.array(w)", "0.15, 0, 3), ( [-1.52, 1.93, -0.76, -0.35, 1.21, -0.39,", "np from fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize( \"w, q, offset, expected\",", "([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6,", "4, 5], 0.1, 0, 4), ([-3, -2, -1, 0, 1,", "0, 1, 2, 3, 4, 5, 6, 7, 8, 9,", "np.array(w) threshold = adaptive_significance_threshold(w, q, offset=offset) assert threshold == expected", "np.inf), ([-3, -2, -1, 0, 1, 2, 3, 4, 5,", "w = np.array(w) threshold = adaptive_significance_threshold(w, q, offset=offset) assert threshold", "import pytest import numpy as np from fanok.selection import adaptive_significance_threshold", "9, 10], 0.15, 0, 3), ( [-1.52, 1.93, -0.76, -0.35,", "1), ([-1, 2, -3, 4, 5], 0.1, 0, 4), ([-3,", "0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3,", "3], 0.1, 0, np.inf), ([-3, -2, -1, 0, 1, 2,", "4), ([-3, -2, -1, 0, 1, 2, 3], 0.1, 0,", "( [-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31,", "([-1, 2, -3, 4, 5], 0.1, 0, 4), ([-3, -2,", "q, offset, expected): w = np.array(w) threshold = adaptive_significance_threshold(w, q,", "numpy as np from fanok.selection import adaptive_significance_threshold @pytest.mark.parametrize( \"w, q,", "0.31, -1.38], 0.1, 0, 1.93, ), ], ) def test_adaptive_significance_threshold(w,", "3, 4, 5, 6, 7, 8, 9, 10], 0.1, 0,", "0, 4), ([-3, -2, -1, 0, 1, 2, 3, 4,", "= np.array(w) threshold = adaptive_significance_threshold(w, q, offset=offset) assert threshold ==", "9, 10], 0.1, 0, 4), ([-3, -2, -1, 0, 1,", "@pytest.mark.parametrize( \"w, q, offset, expected\", [ ([1, 2, 3, 4,", "[ ([1, 2, 3, 4, 5], 0.1, 0, 1), ([-1,", "([1, 2, 3, 4, 5], 0.1, 0, 1), ([-1, 2,", "1.93, ), ], ) def test_adaptive_significance_threshold(w, q, offset, expected): w" ]
[ "i in far: if options and 'far_explicit' in options: far[i]", "'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A',", "'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']:", "450, 'IA-1': 700, 'SD-8': 650, 'SD-14': 800, } for i", "2.0 (the \"License\"); # you may not use this file", "'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i] = 300 for i in", "in ['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i] = 0.6 for i", "\".join(changes) else: return \"\" def serve(options): d = open(\"unit_template.html\") template", "handler to manage the #incoming request server = HTTPServer(('', PORT_NUMBER),", "d[col[0]] = row[idx] return d def compute_count(options = None): conn", "{} for i in ['A-1', 'A-2', 'B', 'SD-2']: far[i] =", "table(options): far, lot_area = get_caps(options) table = [] for i", "['C-1', 'BA-3', 'IB-2', 'O-1']: far[i] = .75 for i in", "\"\"), lot_area.get(i,\"\"))) return \"\\n\".join(table) def unit_cap(row, options=None): if not options:", "'SD-8']: far[i] = 1.75 for i in ['BC', 'O-2']: far[i]", "requests server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down the", "\"A-2\", \"B\")) or zone == \"CRDD\": return -1 if zone", "'SD-6', 'SD-7']: far[i] = 3.0 for i in ['IA-2', 'IB']:", "== -1: continue m += int(t) return m def describe(options):", "in options: changes.append('eliminate single family zoning in A-1 and A-2", "'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i] = 300", "s PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html')", "= int(form['lot_explicit'][0]) if 'lot' in form: options['no_lot'] = True if", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "%s\" % options['lot_explicit']) elif 'lot_factor' in options and options['lot_factor'] !=", "the html message form = parse_qs(urlparse(self.path).query) options = {} for", "= float(form['far_explicit'][0]) if 'lot_explicit' in form and form['lot_explicit']: options['lot_explicit'] =", "Copyright 2019 <NAME> # # Licensed under the Apache License,", "'SD-9', 'SD-10F', 'SD-10H']: far[i] = 0.6 for i in ['C-1',", "lot_area: if options and 'lot_explicit' in options: lot_area[i] = options['lot_explicit']", "template = Template( d.read() ) unit_count = int(compute_count(options)) data =", "True if 'twofamily' in form: options['no_b'] = True self.wfile.write(serve(options)) return", "\"B\" and not 'no_b' in options: m = min(m, 2)", "far[i] = 0.5 for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']:", "\", \".join(changes) else: return \"\" def serve(options): d = open(\"unit_template.html\")", "in ['BA-1', 'SD-12']: far[i] = 1.0 for i in ['C-1A',", "minimums\") elif 'lot_explicit' in options: changes.append(\"set all lot size/unit minimums", "a web server and define the handler to manage the", "'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i] = 1.5 for i in", "language governing permissions and # limitations under the License. from", "'SD-7']: far[i] = 3.0 for i in ['IA-2', 'IB']: far[i]", "#!/usr/bin/python # Copyright 2019 <NAME> # # Licensed under the", "'A-2': 4500, 'C-1A': 1000, 'BC': 500, 'BC-1': 450, 'IA-1': 700,", "form and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit' in form", "t == -1: continue m += int(t) return m def", "lot_area[i] = 1500 for i in ['C-2', 'C-2B', 'O-2', 'BA',", "= 3.5 lot_area = { 'A-1': 6000, 'A-2': 4500, 'C-1A':", "\"CRDD\": return -1 if zone in ['A-1', 'A-2'] and not", "= 0 current = 0 for row in c.execute(\"SELECT *", "in ['C-2A']: far[i] = 2.50 for i in ['C-3', 'C-3A',", "int(form['lot_explicit'][0]) if 'lot' in form: options['no_lot'] = True if 'singlefamily'", "use this file except in compliance with the License. #", "% options['far_explicit']) elif 'far_factor' in options and options['far_factor'] != 1.0:", "options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit' in form and form['lot_explicit']: options['lot_explicit']", "#print row area = float(row.get('gis_lot_size',0) or 0) if zone in", "0.5 for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i] =", "if options and 'lot_explicit' in options: lot_area[i] = options['lot_explicit'] elif", "650, 'SD-14': 800, } for i in ['IB-2', 'BA-1']: lot_area[i]", "= 1200 for i in ['B', 'SD-2', 'SD-3']: lot_area[i] =", "max(area/(lot_area[zone]), 1) else: m = 100000 max_building = area *", "current = 0 for row in c.execute(\"SELECT * FROM lots\"):", "describe(options): changes = [] if 'no_lot' in options: changes.append(\"eliminate lot", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs from jinja2 import", "conn.cursor() c.row_factory = dict_factory m = 0 current = 0", "for i in ['C-1', 'BA-3', 'IB-2', 'O-1']: far[i] = .75", "return m def dict_factory(cursor, row): d = {} for idx,", "License. # You may obtain a copy of the License", "= [] if 'no_lot' in options: changes.append(\"eliminate lot size/unit minimums\")", "on port ' , PORT_NUMBER #Wait forever for incoming htto", "if i in form: options[i] = float(form[i][0]) else: options[i] =", "table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i, far.get(i, \"\"), lot_area.get(i,\"\"))) return \"\\n\".join(table) def unit_cap(row,", "#Create a web server and define the handler to manage", "'B', 'SD-2']: far[i] = 0.5 for i in ['C', 'SD-9',", "i in ['C-2A']: far[i] = 2.50 for i in ['C-3',", "under the License is distributed on an \"AS IS\" BASIS,", "zone = row['zone'] if (not zone.startswith(\"C\") and not zone in", "factor of %s' % options['far_factor']) if len(changes): return \", \".join(changes)", "'C-2B', 'BA', 'BA-2', 'SD-8']: far[i] = 1.75 for i in", "License for the specific language governing permissions and # limitations", "== None: options = {} c = conn.cursor() c.row_factory =", "if len(changes): return \", \".join(changes) else: return \"\" def serve(options):", "-1: continue m += int(t) return m def describe(options): changes", "factor of %s' % options['lot_factor']) if 'no_a' in options: changes.append('eliminate", "changes.append('eliminate two-family zoning limits in B zones') if 'far_explicit' in", "elif 'lot_factor' in options and options['lot_factor'] != 1.0: changes.append('decrease lot", "'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i] = 3.0", "+= int(t) return m def describe(options): changes = [] if", "= 300 for i in lot_area: if options and 'lot_explicit'", "BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs from jinja2 import Template", "' , PORT_NUMBER #Wait forever for incoming htto requests server.serve_forever()", "'far_explicit' in form and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit'", "unit_cap(row, options=options) if t == -1: continue m += int(t)", "permissions and # limitations under the License. from BaseHTTPServer import", "zones') if 'no_b' in options: changes.append('eliminate two-family zoning limits in", "i in ['C-1', 'BA-3', 'IB-2', 'O-1']: far[i] = .75 for", "if 'singlefamily' in form: options['no_a'] = True if 'twofamily' in", "= 600 for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB',", "area = float(row.get('gis_lot_size',0) or 0) if zone in lot_area and", "for incoming htto requests server.serve_forever() except KeyboardInterrupt: print '^C received,", "= far[i] * float(options['far_factor']) if 'no_far' in options: far =", "= max(area/(lot_area[zone]), 1) else: m = 100000 max_building = area", "1) else: m = 100000 max_building = area * far[zone]", "from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs from", "= template.render(**data) return s PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler): def", "in options: changes.append(\"set all FAR maximums to %s\" % options['far_explicit'])", "'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i] = 600 for i", "= 100000 max_building = area * far[zone] * 1 if", "server and define the handler to manage the #incoming request", "lot_area = get_caps(options) zone = row['zone'] if (not zone.startswith(\"C\") and", "= 1500 for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2',", "in compliance with the License. # You may obtain a", "2500 for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] =", "enumerate(cursor.description): d[col[0]] = row[idx] return d def compute_count(options = None):", "options['far_explicit']) elif 'far_factor' in options and options['far_factor'] != 1.0: changes.append('increase", "htto requests server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down", "def serve(options): d = open(\"unit_template.html\") template = Template( d.read() )", "software # distributed under the License is distributed on an", "6000, 'A-2': 4500, 'C-1A': 1000, 'BC': 500, 'BC-1': 450, 'IA-1':", "def describe(options): changes = [] if 'no_lot' in options: changes.append(\"eliminate", "options and 'far_explicit' in options: far[i] = options['far_explicit'] elif options", "# limitations under the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from", "int(compute_count(options)) data = {} data['changes'] = describe(options) data['unit_count'] = unit_count", "for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']: far[i] =", "data = {} data['changes'] = describe(options) data['unit_count'] = unit_count data['increase']", "'IA-1': 700, 'SD-8': 650, 'SD-14': 800, } for i in", "far[i] = 1.5 for i in ['C-2', 'C-2B', 'BA', 'BA-2',", "if 'no_lot' in options: changes.append(\"eliminate lot size/unit minimums\") elif 'lot_explicit'", "urlparse import urlparse, parse_qs from jinja2 import Template import sqlite3", "self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send the html message form =", "1200 for i in ['B', 'SD-2', 'SD-3']: lot_area[i] = 2500", "600 for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1',", "zone in (\"A-1\", \"A-2\", \"B\")) or zone == \"CRDD\": return", "lot_area def table(options): far, lot_area = get_caps(options) table = []", "'SD-10H', 'SD-9']: lot_area[i] = 1800 for i in ['C-1', 'BA-3']:", "1 #print row area = float(row.get('gis_lot_size',0) or 0) if zone", "return \"\" def serve(options): d = open(\"unit_template.html\") template = Template(", "= table(options) data['options'] = options s = template.render(**data) return s", "2.50 for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1',", "far['SD-11'] = 1.7 far['SD-15'] = 3.5 lot_area = { 'A-1':", "unit_count data['increase'] = unit_count-37453 data['table'] = table(options) data['options'] = options", "['BC', 'O-2']: far[i] = 2.0 for i in ['C-2A']: far[i]", "options: lot_area[i] = int(lot_area[i] / float(options['lot_factor'])) if 'no_lot' in options:", "options: return 1 #print row area = float(row.get('gis_lot_size',0) or 0)", "%s' % options['lot_factor']) if 'no_a' in options: changes.append('eliminate single family", "down the web server' server.socket.close() if __name__ == \"__main__\": print", "s = template.render(**data) return s PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler):", "lot_area = { 'A-1': 6000, 'A-2': 4500, 'C-1A': 1000, 'BC':", ") unit_count = int(compute_count(options)) data = {} data['changes'] = describe(options)", "{} return far, lot_area def table(options): far, lot_area = get_caps(options)", "'far_factor' in options: far[i] = far[i] * float(options['far_factor']) if 'no_far'", "'lot_explicit' in options: changes.append(\"set all lot size/unit minimums to %s\"", "i in ['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] = 1800 for", "def run(): try: #Create a web server and define the", "'BA-3']: lot_area[i] = 1500 for i in ['C-2', 'C-2B', 'O-2',", "options: changes.append(\"set all FAR maximums to %s\" % options['far_explicit']) elif", "data['increase'] = unit_count-37453 data['table'] = table(options) data['options'] = options s", "import urllib def get_caps(options): far = {} for i in", "= 0.6 for i in ['C-1', 'BA-3', 'IB-2', 'O-1']: far[i]", "changes.append('increase FAR maximums by a factor of %s' % options['far_factor'])", "'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']:", "= int(lot_area[i] / float(options['lot_factor'])) if 'no_lot' in options: lot_area =", "1) if zone == \"B\" and not 'no_b' in options:", "2.0 for i in ['C-2A']: far[i] = 2.50 for i", "#Wait forever for incoming htto requests server.serve_forever() except KeyboardInterrupt: print", "{ 'A-1': 6000, 'A-2': 4500, 'C-1A': 1000, 'BC': 500, 'BC-1':", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "100000 max_building = area * far[zone] * 1 if max(int(max_building/800),", "['C-2A']: far[i] = 2.50 for i in ['C-3', 'C-3A', 'C-3B',", "far['BB-1'] = 3.25 far['SD-11'] = 1.7 far['SD-15'] = 3.5 lot_area", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "['C-1A', 'SD-5']: far[i] = 1.25 for i in ['IA-1', 'IA',", "in options: lot_area = {} for i in far: if", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "'lot_factor' in options and options['lot_factor'] != 1.0: changes.append('decrease lot size", "under the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import", "lot_area[i] = int(lot_area[i] / float(options['lot_factor'])) if 'no_lot' in options: lot_area", "to in writing, software # distributed under the License is", "= { 'A-1': 6000, 'A-2': 4500, 'C-1A': 1000, 'BC': 500,", "col in enumerate(cursor.description): d[col[0]] = row[idx] return d def compute_count(options", "'twofamily' in form: options['no_b'] = True self.wfile.write(serve(options)) return def run():", "= {} for i in ['A-1', 'A-2', 'B', 'SD-2']: far[i]", "# See the License for the specific language governing permissions", "800, } for i in ['IB-2', 'BA-1']: lot_area[i] = 1200", "or agreed to in writing, software # distributed under the", "None: options = {} c = conn.cursor() c.row_factory = dict_factory", "['A-1', 'A-2', 'B', 'SD-2']: far[i] = 0.5 for i in", "required by applicable law or agreed to in writing, software", "table(options) data['options'] = options s = template.render(**data) return s PORT_NUMBER", "import urlparse, parse_qs from jinja2 import Template import sqlite3 import", "Send the html message form = parse_qs(urlparse(self.path).query) options = {}", "Template( d.read() ) unit_count = int(compute_count(options)) data = {} data['changes']", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "0) if zone in lot_area and area: m = max(area/(lot_area[zone]),", "options['lot_factor'] != 1.0: changes.append('decrease lot size minimums by a factor", "= {} data['changes'] = describe(options) data['unit_count'] = unit_count data['increase'] =", "in form: options[i] = float(form[i][0]) else: options[i] = 1.0 if", "with the License. # You may obtain a copy of", "options: far[i] = options['far_explicit'] elif options and 'far_factor' in options:", "from urlparse import urlparse, parse_qs from jinja2 import Template import", "for i in ['IB-2', 'BA-1']: lot_area[i] = 1200 for i", "i in ['B', 'SD-2', 'SD-3']: lot_area[i] = 2500 for i", "options=None): if not options: options = {} far, lot_area =", "1.75 for i in ['BC', 'O-2']: far[i] = 2.0 for", "return \"\\n\".join(table) def unit_cap(row, options=None): if not options: options =", "'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i,", "float(form[i][0]) else: options[i] = 1.0 if 'far_explicit' in form and", "data['unit_count'] = unit_count data['increase'] = unit_count-37453 data['table'] = table(options) data['options']", "compliance with the License. # You may obtain a copy", "'far_explicit' in options: far[i] = options['far_explicit'] elif options and 'far_factor'", "agreed to in writing, software # distributed under the License", "'no_b' in options: changes.append('eliminate two-family zoning limits in B zones')", "B zones') if 'far_explicit' in options: changes.append(\"set all FAR maximums", "far[i] = far[i] * float(options['far_factor']) if 'no_far' in options: far", "compute_count(options = None): conn = sqlite3.connect(\"prop.db\") if options == None:", "in form and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot' in", "distributed under the License is distributed on an \"AS IS\"", "far[i] = 0.6 for i in ['C-1', 'BA-3', 'IB-2', 'O-1']:", "= options['lot_explicit'] elif options and 'lot_factor' in options: lot_area[i] =", "} for i in ['IB-2', 'BA-1']: lot_area[i] = 1200 for", "{} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return", "['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1',", "options s = template.render(**data) return s PORT_NUMBER = 8080 class", "* far[zone] * 1 if max(int(max_building/800), 1) < m: m", "'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] = 1800 for i in ['C-1',", "else: return \"\" def serve(options): d = open(\"unit_template.html\") template =", "far[i] = .75 for i in ['BA-1', 'SD-12']: far[i] =", "'no_a' in options: changes.append('eliminate single family zoning in A-1 and", "len(changes): return \", \".join(changes) else: return \"\" def serve(options): d", "in ['IB-2', 'BA-1']: lot_area[i] = 1200 for i in ['B',", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "elif options and 'lot_factor' in options: lot_area[i] = int(lot_area[i] /", "= sqlite3.connect(\"prop.db\") if options == None: options = {} c", "form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot' in form: options['no_lot'] =", "zoning limits in B zones') if 'far_explicit' in options: changes.append(\"set", "m = 0 current = 0 for row in c.execute(\"SELECT", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "'BA-2', 'SD-8']: far[i] = 1.75 for i in ['BC', 'O-2']:", "for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1',", "not use this file except in compliance with the License.", "License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs", "lot_area[i] = 1200 for i in ['B', 'SD-2', 'SD-3']: lot_area[i]", "'lot_explicit' in options: lot_area[i] = options['lot_explicit'] elif options and 'lot_factor'", "for i in ['C-2A']: far[i] = 2.50 for i in", "a factor of %s' % options['far_factor']) if len(changes): return \",", "writing, software # distributed under the License is distributed on", "'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i] =", "\"\\n\".join(table) def unit_cap(row, options=None): if not options: options = {}", "4500, 'C-1A': 1000, 'BC': 500, 'BC-1': 450, 'IA-1': 700, 'SD-8':", "* float(options['far_factor']) if 'no_far' in options: far = {} return", "you may not use this file except in compliance with", "i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A',", "'C-3', 'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i, far.get(i, \"\"), lot_area.get(i,\"\"))) return", "= None): conn = sqlite3.connect(\"prop.db\") if options == None: options", "= max(int(max_building/800), 1) if zone == \"B\" and not 'no_b'", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "and not zone in (\"A-1\", \"A-2\", \"B\")) or zone ==", "in form: options['no_a'] = True if 'twofamily' in form: options['no_b']", "1.25 for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i]", "not zone in (\"A-1\", \"A-2\", \"B\")) or zone == \"CRDD\":", "= 2500 for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i]", "['BA-1', 'SD-12']: far[i] = 1.0 for i in ['C-1A', 'SD-5']:", "not options: options = {} far, lot_area = get_caps(options) zone", "3.0 for i in ['IA-2', 'IB']: far[i] = 4.0 far['BB-1']", "300 for i in lot_area: if options and 'lot_explicit' in", "None): conn = sqlite3.connect(\"prop.db\") if options == None: options =", "1000, 'BC': 500, 'BC-1': 450, 'IA-1': 700, 'SD-8': 650, 'SD-14':", "except KeyboardInterrupt: print '^C received, shutting down the web server'", "PORT_NUMBER), myHandler) print 'Started httpserver on port ' , PORT_NUMBER", "{} for i in far: if options and 'far_explicit' in", "in far: if options and 'far_explicit' in options: far[i] =", "return s PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200)", "print '^C received, shutting down the web server' server.socket.close() if", "size minimums by a factor of %s' % options['lot_factor']) if", "CONDITIONS OF ANY KIND, either express or implied. # See", "lot_area[i] = 600 for i in ['C-2A', 'C-3', 'C-3A', 'C-3B',", "in options: far[i] = far[i] * float(options['far_factor']) if 'no_far' in", "= {} far, lot_area = get_caps(options) zone = row['zone'] if", "0 for row in c.execute(\"SELECT * FROM lots\"): t =", "import Template import sqlite3 import urllib def get_caps(options): far =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "'SD-11', 'SD-13']: lot_area[i] = 600 for i in ['C-2A', 'C-3',", "float(options['lot_factor'])) if 'no_lot' in options: lot_area = {} for i", "= {} for i in far: if options and 'far_explicit'", "FAR maximums to %s\" % options['far_explicit']) elif 'far_factor' in options", "for i in ['C-1', 'BA-3']: lot_area[i] = 1500 for i", "far, lot_area = get_caps(options) table = [] for i in", "'no_far' in options: far = {} return far, lot_area def", "size/unit minimums to %s\" % options['lot_explicit']) elif 'lot_factor' in options", "m = 100000 max_building = area * far[zone] * 1", "A-1 and A-2 zones') if 'no_b' in options: changes.append('eliminate two-family", "% options['lot_explicit']) elif 'lot_factor' in options and options['lot_factor'] != 1.0:", "def unit_cap(row, options=None): if not options: options = {} far,", "'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i] =", "BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs from jinja2", "\"B\")) or zone == \"CRDD\": return -1 if zone in", "run(): try: #Create a web server and define the handler", "= HTTPServer(('', PORT_NUMBER), myHandler) print 'Started httpserver on port '", "c = conn.cursor() c.row_factory = dict_factory m = 0 current", "in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6',", "options: lot_area = {} for i in far: if options", "'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i] = 3.0 for i", "0.6 for i in ['C-1', 'BA-3', 'IB-2', 'O-1']: far[i] =", "myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send the html", "'SD-1', 'SD-6', 'SD-7']: lot_area[i] = 300 for i in lot_area:", "in enumerate(cursor.description): d[col[0]] = row[idx] return d def compute_count(options =", "a factor of %s' % options['lot_factor']) if 'no_a' in options:", "= parse_qs(urlparse(self.path).query) options = {} for i in ['far_factor', 'lot_factor']:", "3.5 lot_area = { 'A-1': 6000, 'A-2': 4500, 'C-1A': 1000,", "'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6',", "far[i] = 3.0 for i in ['IA-2', 'IB']: far[i] =", "lot_area = get_caps(options) table = [] for i in ['A-1',", "OR CONDITIONS OF ANY KIND, either express or implied. #", "row area = float(row.get('gis_lot_size',0) or 0) if zone in lot_area", "lot size minimums by a factor of %s' % options['lot_factor'])", "i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i] = 1.5", "options[i] = 1.0 if 'far_explicit' in form and form['far_explicit']: options['far_explicit']", "lot size/unit minimums\") elif 'lot_explicit' in options: changes.append(\"set all lot", "1800 for i in ['C-1', 'BA-3']: lot_area[i] = 1500 for", "for i in ['BC', 'O-2']: far[i] = 2.0 for i", "sqlite3.connect(\"prop.db\") if options == None: options = {} c =", "the License is distributed on an \"AS IS\" BASIS, #", "for i in ['BA-1', 'SD-12']: far[i] = 1.0 for i", "changes.append(\"eliminate lot size/unit minimums\") elif 'lot_explicit' in options: changes.append(\"set all", "!= 1.0: changes.append('decrease lot size minimums by a factor of", "'IB-2', 'O-1']: far[i] = .75 for i in ['BA-1', 'SD-12']:", "if zone in lot_area and area: m = max(area/(lot_area[zone]), 1)", "options and 'far_factor' in options: far[i] = far[i] * float(options['far_factor'])", "httpserver on port ' , PORT_NUMBER #Wait forever for incoming", "'BC': 500, 'BC-1': 450, 'IA-1': 700, 'SD-8': 650, 'SD-14': 800,", "and 'lot_explicit' in options: lot_area[i] = options['lot_explicit'] elif options and", "1.5 for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']: far[i]", "if 'no_a' in options: changes.append('eliminate single family zoning in A-1", "= options s = template.render(**data) return s PORT_NUMBER = 8080", "if 'lot' in form: options['no_lot'] = True if 'singlefamily' in", "limitations under the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse", "and area: m = max(area/(lot_area[zone]), 1) else: m = 100000", "'O-2A', 'SD-4A', 'SD-13']: far[i] = 1.5 for i in ['C-2',", "for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2',", "zone == \"CRDD\": return -1 if zone in ['A-1', 'A-2']", "= options['far_explicit'] elif options and 'far_factor' in options: far[i] =", "urlparse, parse_qs from jinja2 import Template import sqlite3 import urllib", "'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" %", "area * far[zone] * 1 if max(int(max_building/800), 1) < m:", "PORT_NUMBER #Wait forever for incoming htto requests server.serve_forever() except KeyboardInterrupt:", "far[zone] * 1 if max(int(max_building/800), 1) < m: m =", "if (not zone.startswith(\"C\") and not zone in (\"A-1\", \"A-2\", \"B\"))", "(\"A-1\", \"A-2\", \"B\")) or zone == \"CRDD\": return -1 if", "= unit_count data['increase'] = unit_count-37453 data['table'] = table(options) data['options'] =", "d = open(\"unit_template.html\") template = Template( d.read() ) unit_count =", "'SD-4A', 'SD-13']: far[i] = 1.5 for i in ['C-2', 'C-2B',", "law or agreed to in writing, software # distributed under", "if options == None: options = {} c = conn.cursor()", "4.0 far['BB-1'] = 3.25 far['SD-11'] = 1.7 far['SD-15'] = 3.5", "options['lot_explicit']) elif 'lot_factor' in options and options['lot_factor'] != 1.0: changes.append('decrease", "= 2.0 for i in ['C-2A']: far[i] = 2.50 for", "= {} c = conn.cursor() c.row_factory = dict_factory m =", "in A-1 and A-2 zones') if 'no_b' in options: changes.append('eliminate", "governing permissions and # limitations under the License. from BaseHTTPServer", "if zone in ['A-1', 'A-2'] and not 'no_a' in options:", "table = [] for i in ['A-1', 'A-2', 'B', 'C',", "= 0.5 for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i]", "all lot size/unit minimums to %s\" % options['lot_explicit']) elif 'lot_factor'", "to %s\" % options['lot_explicit']) elif 'lot_factor' in options and options['lot_factor']", "'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i] = 600", "return def run(): try: #Create a web server and define", "incoming htto requests server.serve_forever() except KeyboardInterrupt: print '^C received, shutting", "far[i] = 2.50 for i in ['C-3', 'C-3A', 'C-3B', 'BB',", "size/unit minimums\") elif 'lot_explicit' in options: changes.append(\"set all lot size/unit", "of %s' % options['lot_factor']) if 'no_a' in options: changes.append('eliminate single", "far[i] = 1.75 for i in ['BC', 'O-2']: far[i] =", "far[i] = 2.0 for i in ['C-2A']: far[i] = 2.50", "options and 'lot_factor' in options: lot_area[i] = int(lot_area[i] / float(options['lot_factor']))", "lots\"): t = unit_cap(row, options=options) if t == -1: continue", "t = unit_cap(row, options=options) if t == -1: continue m", "and # limitations under the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer", "'lot_factor']: if i in form: options[i] = float(form[i][0]) else: options[i]", "'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i]", "< m: m = max(int(max_building/800), 1) if zone == \"B\"", "row in c.execute(\"SELECT * FROM lots\"): t = unit_cap(row, options=options)", "parse_qs from jinja2 import Template import sqlite3 import urllib def", "700, 'SD-8': 650, 'SD-14': 800, } for i in ['IB-2',", "options[i] = float(form[i][0]) else: options[i] = 1.0 if 'far_explicit' in", "in form and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit' in", "'SD-2', 'SD-3']: lot_area[i] = 2500 for i in ['C', 'SD-10F',", ".75 for i in ['BA-1', 'SD-12']: far[i] = 1.0 for", "['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i] = 0.6 for i in", "define the handler to manage the #incoming request server =", "return \", \".join(changes) else: return \"\" def serve(options): d =", "m += int(t) return m def describe(options): changes = []", "may obtain a copy of the License at # #", "in options: return 1 #print row area = float(row.get('gis_lot_size',0) or", "else: options[i] = 1.0 if 'far_explicit' in form and form['far_explicit']:", "= 1.0 if 'far_explicit' in form and form['far_explicit']: options['far_explicit'] =", "'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i, far.get(i, \"\"), lot_area.get(i,\"\"))) return \"\\n\".join(table) def", "options['no_a'] = True if 'twofamily' in form: options['no_b'] = True", "far.get(i, \"\"), lot_area.get(i,\"\"))) return \"\\n\".join(table) def unit_cap(row, options=None): if not", "for i in ['C-1A', 'SD-5']: far[i] = 1.25 for i", "get_caps(options): far = {} for i in ['A-1', 'A-2', 'B',", "in options: changes.append(\"set all lot size/unit minimums to %s\" %", "['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']: far[i] = 1.75 for i", "forever for incoming htto requests server.serve_forever() except KeyboardInterrupt: print '^C", "changes.append(\"set all FAR maximums to %s\" % options['far_explicit']) elif 'far_factor'", "'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i, far.get(i,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "(not zone.startswith(\"C\") and not zone in (\"A-1\", \"A-2\", \"B\")) or", "# Send the html message form = parse_qs(urlparse(self.path).query) options =", "lot_area[i] = 2500 for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']:", "for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i] =", "{} data['changes'] = describe(options) data['unit_count'] = unit_count data['increase'] = unit_count-37453", "may not use this file except in compliance with the", "= 0 for row in c.execute(\"SELECT * FROM lots\"): t", "elif options and 'far_factor' in options: far[i] = far[i] *", "'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i] = 600 for i in", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "float(row.get('gis_lot_size',0) or 0) if zone in lot_area and area: m", "options and 'lot_explicit' in options: lot_area[i] = options['lot_explicit'] elif options", "this file except in compliance with the License. # You", "if 'no_lot' in options: lot_area = {} for i in", "not 'no_a' in options: return 1 #print row area =", "= True if 'singlefamily' in form: options['no_a'] = True if", "for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] = 1800", "return d def compute_count(options = None): conn = sqlite3.connect(\"prop.db\") if", "all FAR maximums to %s\" % options['far_explicit']) elif 'far_factor' in", "<NAME> # # Licensed under the Apache License, Version 2.0", "or zone == \"CRDD\": return -1 if zone in ['A-1',", "server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down the web", "if 'far_explicit' in options: changes.append(\"set all FAR maximums to %s\"", "/ float(options['lot_factor'])) if 'no_lot' in options: lot_area = {} for", "if options and 'far_explicit' in options: far[i] = options['far_explicit'] elif", "options == None: options = {} c = conn.cursor() c.row_factory", "options: changes.append('eliminate two-family zoning limits in B zones') if 'far_explicit'", "options['no_b'] = True self.wfile.write(serve(options)) return def run(): try: #Create a", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i] = 1.5 for i", "'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i, far.get(i, \"\"), lot_area.get(i,\"\"))) return \"\\n\".join(table)", "m = min(m, 2) return m def dict_factory(cursor, row): d", "in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']: far[i] = 1.75 for", "'SD-12']: far[i] = 1.0 for i in ['C-1A', 'SD-5']: far[i]", "# # Licensed under the Apache License, Version 2.0 (the", "data['changes'] = describe(options) data['unit_count'] = unit_count data['increase'] = unit_count-37453 data['table']", "do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send the html message form", "# Copyright 2019 <NAME> # # Licensed under the Apache", "try: #Create a web server and define the handler to", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "self.end_headers() # Send the html message form = parse_qs(urlparse(self.path).query) options", "row['zone'] if (not zone.startswith(\"C\") and not zone in (\"A-1\", \"A-2\",", "changes.append('eliminate single family zoning in A-1 and A-2 zones') if", "'^C received, shutting down the web server' server.socket.close() if __name__", "in options: lot_area[i] = options['lot_explicit'] elif options and 'lot_factor' in", "'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\"", "changes = [] if 'no_lot' in options: changes.append(\"eliminate lot size/unit", "and not 'no_b' in options: m = min(m, 2) return", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "row[idx] return d def compute_count(options = None): conn = sqlite3.connect(\"prop.db\")", "form and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot' in form:", "['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] = 1800 for i in", "['C-1', 'BA-3']: lot_area[i] = 1500 for i in ['C-2', 'C-2B',", "2019 <NAME> # # Licensed under the Apache License, Version", "for i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2',", "in c.execute(\"SELECT * FROM lots\"): t = unit_cap(row, options=options) if", "'lot_explicit' in form and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot'", "['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']:", "in ['IA-2', 'IB']: far[i] = 4.0 far['BB-1'] = 3.25 far['SD-11']", "= 2.50 for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2',", "{} c = conn.cursor() c.row_factory = dict_factory m = 0", "'SD-8': 650, 'SD-14': 800, } for i in ['IB-2', 'BA-1']:", "'no_a' in options: return 1 #print row area = float(row.get('gis_lot_size',0)", "far['SD-15'] = 3.5 lot_area = { 'A-1': 6000, 'A-2': 4500,", "unit_count = int(compute_count(options)) data = {} data['changes'] = describe(options) data['unit_count']", "'no_b' in options: m = min(m, 2) return m def", "['IB-2', 'BA-1']: lot_area[i] = 1200 for i in ['B', 'SD-2',", "= {} return far, lot_area def table(options): far, lot_area =", "def table(options): far, lot_area = get_caps(options) table = [] for", "= unit_cap(row, options=options) if t == -1: continue m +=", "% options['lot_factor']) if 'no_a' in options: changes.append('eliminate single family zoning", "'SD-3']: lot_area[i] = 2500 for i in ['C', 'SD-10F', 'SD-10H',", "int(t) return m def describe(options): changes = [] if 'no_lot'", "1.0: changes.append('decrease lot size minimums by a factor of %s'", "= 1.5 for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']:", "= [] for i in ['A-1', 'A-2', 'B', 'C', 'C-1',", "c.execute(\"SELECT * FROM lots\"): t = unit_cap(row, options=options) if t", "in ['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] = 1800 for i", "options['lot_explicit'] elif options and 'lot_factor' in options: lot_area[i] = int(lot_area[i]", "if max(int(max_building/800), 1) < m: m = max(int(max_building/800), 1) if", "sqlite3 import urllib def get_caps(options): far = {} for i", "'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i, far.get(i, \"\"),", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i] = 600 for", "Template import sqlite3 import urllib def get_caps(options): far = {}", "'Started httpserver on port ' , PORT_NUMBER #Wait forever for", "received, shutting down the web server' server.socket.close() if __name__ ==", "for i in ['IA-2', 'IB']: far[i] = 4.0 far['BB-1'] =", "in options: m = min(m, 2) return m def dict_factory(cursor,", "lot_area = {} for i in far: if options and", "or implied. # See the License for the specific language", "#incoming request server = HTTPServer(('', PORT_NUMBER), myHandler) print 'Started httpserver", "max(int(max_building/800), 1) < m: m = max(int(max_building/800), 1) if zone", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "far, lot_area def table(options): far, lot_area = get_caps(options) table =", "int(lot_area[i] / float(options['lot_factor'])) if 'no_lot' in options: lot_area = {}", "elif 'far_factor' in options and options['far_factor'] != 1.0: changes.append('increase FAR", "= {} for i in ['far_factor', 'lot_factor']: if i in", "= .75 for i in ['BA-1', 'SD-12']: far[i] = 1.0", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "i in lot_area: if options and 'lot_explicit' in options: lot_area[i]", "{} far, lot_area = get_caps(options) zone = row['zone'] if (not", "= 3.0 for i in ['IA-2', 'IB']: far[i] = 4.0", "'IB']: far[i] = 4.0 far['BB-1'] = 3.25 far['SD-11'] = 1.7", "* 1 if max(int(max_building/800), 1) < m: m = max(int(max_building/800),", "zones') if 'far_explicit' in options: changes.append(\"set all FAR maximums to", "open(\"unit_template.html\") template = Template( d.read() ) unit_count = int(compute_count(options)) data", "FAR maximums by a factor of %s' % options['far_factor']) if", "minimums to %s\" % options['lot_explicit']) elif 'lot_factor' in options and", "options: options = {} far, lot_area = get_caps(options) zone =", "'singlefamily' in form: options['no_a'] = True if 'twofamily' in form:", "%s' % options['far_factor']) if len(changes): return \", \".join(changes) else: return", "options = {} for i in ['far_factor', 'lot_factor']: if i", "(the \"License\"); # you may not use this file except", "i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']: far[i] = 1.75", "= float(row.get('gis_lot_size',0) or 0) if zone in lot_area and area:", "to manage the #incoming request server = HTTPServer(('', PORT_NUMBER), myHandler)", "# you may not use this file except in compliance", "for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i] = 0.6", "options['no_lot'] = True if 'singlefamily' in form: options['no_a'] = True", "'A-2', 'B', 'SD-2']: far[i] = 0.5 for i in ['C',", "c.row_factory = dict_factory m = 0 current = 0 for", "def compute_count(options = None): conn = sqlite3.connect(\"prop.db\") if options ==", "print 'Started httpserver on port ' , PORT_NUMBER #Wait forever", "conn = sqlite3.connect(\"prop.db\") if options == None: options = {}", "'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i] = 300 for i", "if not options: options = {} far, lot_area = get_caps(options)", "* FROM lots\"): t = unit_cap(row, options=options) if t ==", "template.render(**data) return s PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self):", "options=options) if t == -1: continue m += int(t) return", "'SD-14': 800, } for i in ['IB-2', 'BA-1']: lot_area[i] =", "# # Unless required by applicable law or agreed to", "i in ['BA-1', 'SD-12']: far[i] = 1.0 for i in", "in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11',", "['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3',", "by a factor of %s' % options['far_factor']) if len(changes): return", "'SD-10F', 'SD-10H']: far[i] = 0.6 for i in ['C-1', 'BA-3',", "1500 for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4',", "= describe(options) data['unit_count'] = unit_count data['increase'] = unit_count-37453 data['table'] =", "zone in ['A-1', 'A-2'] and not 'no_a' in options: return", "and 'lot_factor' in options: lot_area[i] = int(lot_area[i] / float(options['lot_factor'])) if", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "i in form: options[i] = float(form[i][0]) else: options[i] = 1.0", "if 'far_explicit' in form and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if", "Version 2.0 (the \"License\"); # you may not use this", "'O-1']: far[i] = .75 for i in ['BA-1', 'SD-12']: far[i]", "'far_explicit' in options: changes.append(\"set all FAR maximums to %s\" %", "i in ['BC', 'O-2']: far[i] = 2.0 for i in", "in options: far[i] = options['far_explicit'] elif options and 'far_factor' in", "500, 'BC-1': 450, 'IA-1': 700, 'SD-8': 650, 'SD-14': 800, }", "idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d def", "in ['C-1', 'BA-3']: lot_area[i] = 1500 for i in ['C-2',", "i in ['far_factor', 'lot_factor']: if i in form: options[i] =", "'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i]", "= 8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() #", "= 1.7 far['SD-15'] = 3.5 lot_area = { 'A-1': 6000,", "'lot_factor' in options: lot_area[i] = int(lot_area[i] / float(options['lot_factor'])) if 'no_lot'", "d = {} for idx, col in enumerate(cursor.description): d[col[0]] =", ", PORT_NUMBER #Wait forever for incoming htto requests server.serve_forever() except", "== \"B\" and not 'no_b' in options: m = min(m,", "= {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx]", "max_building = area * far[zone] * 1 if max(int(max_building/800), 1)", "implied. # See the License for the specific language governing", "return 1 #print row area = float(row.get('gis_lot_size',0) or 0) if", "in options: changes.append(\"eliminate lot size/unit minimums\") elif 'lot_explicit' in options:", "data['options'] = options s = template.render(**data) return s PORT_NUMBER =", "under the Apache License, Version 2.0 (the \"License\"); # you", "= True self.wfile.write(serve(options)) return def run(): try: #Create a web", "options: m = min(m, 2) return m def dict_factory(cursor, row):", "= int(compute_count(options)) data = {} data['changes'] = describe(options) data['unit_count'] =", "describe(options) data['unit_count'] = unit_count data['increase'] = unit_count-37453 data['table'] = table(options)", "and 'far_explicit' in options: far[i] = options['far_explicit'] elif options and", "'A-2'] and not 'no_a' in options: return 1 #print row", "for i in far: if options and 'far_explicit' in options:", "= open(\"unit_template.html\") template = Template( d.read() ) unit_count = int(compute_count(options))", "by applicable law or agreed to in writing, software #", "options and options['lot_factor'] != 1.0: changes.append('decrease lot size minimums by", "self.wfile.write(serve(options)) return def run(): try: #Create a web server and", "'no_lot' in options: changes.append(\"eliminate lot size/unit minimums\") elif 'lot_explicit' in", "= 4.0 far['BB-1'] = 3.25 far['SD-11'] = 1.7 far['SD-15'] =", "['B', 'SD-2', 'SD-3']: lot_area[i] = 2500 for i in ['C',", "1 if max(int(max_building/800), 1) < m: m = max(int(max_building/800), 1)", "zone == \"B\" and not 'no_b' in options: m =", "m def dict_factory(cursor, row): d = {} for idx, col", "get_caps(options) table = [] for i in ['A-1', 'A-2', 'B',", "i in ['IA-2', 'IB']: far[i] = 4.0 far['BB-1'] = 3.25", "by a factor of %s' % options['lot_factor']) if 'no_a' in", "far[i] = 1.0 for i in ['C-1A', 'SD-5']: far[i] =", "'far_factor' in options and options['far_factor'] != 1.0: changes.append('increase FAR maximums", "1.0 if 'far_explicit' in form and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0])", "form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit' in form and form['lot_explicit']:", "options: changes.append(\"eliminate lot size/unit minimums\") elif 'lot_explicit' in options: changes.append(\"set", "else: m = 100000 max_building = area * far[zone] *", "'BC-1': 450, 'IA-1': 700, 'SD-8': 650, 'SD-14': 800, } for", "HTTPServer(('', PORT_NUMBER), myHandler) print 'Started httpserver on port ' ,", "float(options['far_factor']) if 'no_far' in options: far = {} return far,", "min(m, 2) return m def dict_factory(cursor, row): d = {}", "['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']:", "the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse,", "in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A',", "= get_caps(options) zone = row['zone'] if (not zone.startswith(\"C\") and not", "or 0) if zone in lot_area and area: m =", "self.send_header('Content-type','text/html') self.end_headers() # Send the html message form = parse_qs(urlparse(self.path).query)", "'SD-5', 'SD-11', 'SD-13']: lot_area[i] = 600 for i in ['C-2A',", "m = max(area/(lot_area[zone]), 1) else: m = 100000 max_building =", "KeyboardInterrupt: print '^C received, shutting down the web server' server.socket.close()", "i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1',", "in ['B', 'SD-2', 'SD-3']: lot_area[i] = 2500 for i in", "= unit_count-37453 data['table'] = table(options) data['options'] = options s =", "in (\"A-1\", \"A-2\", \"B\")) or zone == \"CRDD\": return -1", "form: options['no_b'] = True self.wfile.write(serve(options)) return def run(): try: #Create", "web server and define the handler to manage the #incoming", "in ['C-1', 'BA-3', 'IB-2', 'O-1']: far[i] = .75 for i", "% options['far_factor']) if len(changes): return \", \".join(changes) else: return \"\"", "\"\" def serve(options): d = open(\"unit_template.html\") template = Template( d.read()", "zone in lot_area and area: m = max(area/(lot_area[zone]), 1) else:", "in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B',", "for row in c.execute(\"SELECT * FROM lots\"): t = unit_cap(row,", "far = {} return far, lot_area def table(options): far, lot_area", "options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot' in form: options['no_lot'] = True", "if 'twofamily' in form: options['no_b'] = True self.wfile.write(serve(options)) return def", "!= 1.0: changes.append('increase FAR maximums by a factor of %s'", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= 1.75 for i in ['BC', 'O-2']: far[i] = 2.0", "['IA-2', 'IB']: far[i] = 4.0 far['BB-1'] = 3.25 far['SD-11'] =", "FROM lots\"): t = unit_cap(row, options=options) if t == -1:", "i in ['C-1', 'BA-3']: lot_area[i] = 1500 for i in", "Unless required by applicable law or agreed to in writing,", "i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5',", "'C-1A': 1000, 'BC': 500, 'BC-1': 450, 'IA-1': 700, 'SD-8': 650,", "'SD-2']: far[i] = 0.5 for i in ['C', 'SD-9', 'SD-10F',", "and not 'no_a' in options: return 1 #print row area", "True self.wfile.write(serve(options)) return def run(): try: #Create a web server", "= float(form[i][0]) else: options[i] = 1.0 if 'far_explicit' in form", "m = max(int(max_building/800), 1) if zone == \"B\" and not", "maximums to %s\" % options['far_explicit']) elif 'far_factor' in options and", "message form = parse_qs(urlparse(self.path).query) options = {} for i in", "3.25 far['SD-11'] = 1.7 far['SD-15'] = 3.5 lot_area = {", "the specific language governing permissions and # limitations under the", "in form: options['no_b'] = True self.wfile.write(serve(options)) return def run(): try:", "= area * far[zone] * 1 if max(int(max_building/800), 1) <", "for i in lot_area: if options and 'lot_explicit' in options:", "get_caps(options) zone = row['zone'] if (not zone.startswith(\"C\") and not zone", "changes.append('decrease lot size minimums by a factor of %s' %", "applicable law or agreed to in writing, software # distributed", "'BA', 'BA-2', 'SD-8']: far[i] = 1.75 for i in ['BC',", "and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot' in form: options['no_lot']", "shutting down the web server' server.socket.close() if __name__ == \"__main__\":", "'SD-5']: far[i] = 1.25 for i in ['IA-1', 'IA', 'O-2A',", "d def compute_count(options = None): conn = sqlite3.connect(\"prop.db\") if options", "if 'lot_explicit' in form and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if", "PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers()", "in ['far_factor', 'lot_factor']: if i in form: options[i] = float(form[i][0])", "for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d", "<reponame>fintelia/habitationi #!/usr/bin/python # Copyright 2019 <NAME> # # Licensed under", "{} for i in ['far_factor', 'lot_factor']: if i in form:", "lot_area[i] = 1800 for i in ['C-1', 'BA-3']: lot_area[i] =", "'SD-10H']: far[i] = 0.6 for i in ['C-1', 'BA-3', 'IB-2',", "in writing, software # distributed under the License is distributed", "data['table'] = table(options) data['options'] = options s = template.render(**data) return", "zoning in A-1 and A-2 zones') if 'no_b' in options:", "1) < m: m = max(int(max_building/800), 1) if zone ==", "in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i] = 1.5 for", "return -1 if zone in ['A-1', 'A-2'] and not 'no_a'", "'BA-1']: lot_area[i] = 1200 for i in ['B', 'SD-2', 'SD-3']:", "% (i, far.get(i, \"\"), lot_area.get(i,\"\"))) return \"\\n\".join(table) def unit_cap(row, options=None):", "far[i] = 1.25 for i in ['IA-1', 'IA', 'O-2A', 'SD-4A',", "= 1800 for i in ['C-1', 'BA-3']: lot_area[i] = 1500", "form: options[i] = float(form[i][0]) else: options[i] = 1.0 if 'far_explicit'", "form: options['no_lot'] = True if 'singlefamily' in form: options['no_a'] =", "in options: far = {} return far, lot_area def table(options):", "'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i]", "options: changes.append('eliminate single family zoning in A-1 and A-2 zones')", "single family zoning in A-1 and A-2 zones') if 'no_b'", "'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i] =", "in ['BC', 'O-2']: far[i] = 2.0 for i in ['C-2A']:", "in B zones') if 'far_explicit' in options: changes.append(\"set all FAR", "from jinja2 import Template import sqlite3 import urllib def get_caps(options):", "of %s' % options['far_factor']) if len(changes): return \", \".join(changes) else:", "max(int(max_building/800), 1) if zone == \"B\" and not 'no_b' in", "for i in ['B', 'SD-2', 'SD-3']: lot_area[i] = 2500 for", "options['far_factor']) if len(changes): return \", \".join(changes) else: return \"\" def", "and options['lot_factor'] != 1.0: changes.append('decrease lot size minimums by a", "(i, far.get(i, \"\"), lot_area.get(i,\"\"))) return \"\\n\".join(table) def unit_cap(row, options=None): if", "not 'no_b' in options: m = min(m, 2) return m", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "if t == -1: continue m += int(t) return m", "in options: changes.append('eliminate two-family zoning limits in B zones') if", "options['far_factor'] != 1.0: changes.append('increase FAR maximums by a factor of", "port ' , PORT_NUMBER #Wait forever for incoming htto requests", "License, Version 2.0 (the \"License\"); # you may not use", "= Template( d.read() ) unit_count = int(compute_count(options)) data = {}", "return m def describe(options): changes = [] if 'no_lot' in", "# You may obtain a copy of the License at", "family zoning in A-1 and A-2 zones') if 'no_b' in", "'BA-3', 'IB-2', 'O-1']: far[i] = .75 for i in ['BA-1',", "options = {} c = conn.cursor() c.row_factory = dict_factory m", "the handler to manage the #incoming request server = HTTPServer(('',", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "in lot_area and area: m = max(area/(lot_area[zone]), 1) else: m", "changes.append(\"set all lot size/unit minimums to %s\" % options['lot_explicit']) elif", "to %s\" % options['far_explicit']) elif 'far_factor' in options and options['far_factor']", "1.0: changes.append('increase FAR maximums by a factor of %s' %", "'SD-1', 'SD-6', 'SD-7']: far[i] = 3.0 for i in ['IA-2',", "= row[idx] return d def compute_count(options = None): conn =", "unit_cap(row, options=None): if not options: options = {} far, lot_area", "True if 'singlefamily' in form: options['no_a'] = True if 'twofamily'", "and A-2 zones') if 'no_b' in options: changes.append('eliminate two-family zoning", "d.read() ) unit_count = int(compute_count(options)) data = {} data['changes'] =", "def get_caps(options): far = {} for i in ['A-1', 'A-2',", "'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i] = 3.0 for i in", "in options: lot_area[i] = int(lot_area[i] / float(options['lot_factor'])) if 'no_lot' in", "0 current = 0 for row in c.execute(\"SELECT * FROM", "two-family zoning limits in B zones') if 'far_explicit' in options:", "and define the handler to manage the #incoming request server", "in options and options['far_factor'] != 1.0: changes.append('increase FAR maximums by", "the License for the specific language governing permissions and #", "if 'no_b' in options: changes.append('eliminate two-family zoning limits in B", "Apache License, Version 2.0 (the \"License\"); # you may not", "area: m = max(area/(lot_area[zone]), 1) else: m = 100000 max_building", "far = {} for i in ['A-1', 'A-2', 'B', 'SD-2']:", "the web server' server.socket.close() if __name__ == \"__main__\": print run()", "either express or implied. # See the License for the", "dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description):", "options and options['far_factor'] != 1.0: changes.append('increase FAR maximums by a", "lot size/unit minimums to %s\" % options['lot_explicit']) elif 'lot_factor' in", "1.7 far['SD-15'] = 3.5 lot_area = { 'A-1': 6000, 'A-2':", "[] if 'no_lot' in options: changes.append(\"eliminate lot size/unit minimums\") elif", "elif 'lot_explicit' in options: changes.append(\"set all lot size/unit minimums to", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "zone.startswith(\"C\") and not zone in (\"A-1\", \"A-2\", \"B\")) or zone", "i in ['A-1', 'A-2', 'B', 'SD-2']: far[i] = 0.5 for", "'O-2']: far[i] = 2.0 for i in ['C-2A']: far[i] =", "[] for i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A',", "if 'no_far' in options: far = {} return far, lot_area", "for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A',", "== \"CRDD\": return -1 if zone in ['A-1', 'A-2'] and", "far[i] * float(options['far_factor']) if 'no_far' in options: far = {}", "options: changes.append(\"set all lot size/unit minimums to %s\" % options['lot_explicit'])", "'SD-9']: lot_area[i] = 1800 for i in ['C-1', 'BA-3']: lot_area[i]", "serve(options): d = open(\"unit_template.html\") template = Template( d.read() ) unit_count", "i in ['IB-2', 'BA-1']: lot_area[i] = 1200 for i in", "m def describe(options): changes = [] if 'no_lot' in options:", "%s\" % options['far_explicit']) elif 'far_factor' in options and options['far_factor'] !=", "dict_factory m = 0 current = 0 for row in", "options: far[i] = far[i] * float(options['far_factor']) if 'no_far' in options:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "'no_lot' in options: lot_area = {} for i in far:", "i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3',", "options = {} far, lot_area = get_caps(options) zone = row['zone']", "-1 if zone in ['A-1', 'A-2'] and not 'no_a' in", "i in ['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i] = 0.6 for", "form: options['no_a'] = True if 'twofamily' in form: options['no_b'] =", "= dict_factory m = 0 current = 0 for row", "server = HTTPServer(('', PORT_NUMBER), myHandler) print 'Started httpserver on port", "options['lot_factor']) if 'no_a' in options: changes.append('eliminate single family zoning in", "'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i] = 300 for", "lot_area and area: m = max(area/(lot_area[zone]), 1) else: m =", "minimums by a factor of %s' % options['lot_factor']) if 'no_a'", "lot_area[i] = options['lot_explicit'] elif options and 'lot_factor' in options: lot_area[i]", "['far_factor', 'lot_factor']: if i in form: options[i] = float(form[i][0]) else:", "far: if options and 'far_explicit' in options: far[i] = options['far_explicit']", "request server = HTTPServer(('', PORT_NUMBER), myHandler) print 'Started httpserver on", "= 1.25 for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']:", "= conn.cursor() c.row_factory = dict_factory m = 0 current =", "in options and options['lot_factor'] != 1.0: changes.append('decrease lot size minimums", "= 1.0 for i in ['C-1A', 'SD-5']: far[i] = 1.25", "class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send the", "parse_qs(urlparse(self.path).query) options = {} for i in ['far_factor', 'lot_factor']: if", "in ['A-1', 'A-2', 'B', 'SD-2']: far[i] = 0.5 for i", "options: far = {} return far, lot_area def table(options): far,", "return far, lot_area def table(options): far, lot_area = get_caps(options) table", "\"License\"); # you may not use this file except in", "jinja2 import Template import sqlite3 import urllib def get_caps(options): far", "'SD-7']: lot_area[i] = 300 for i in lot_area: if options", "and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit' in form and", "import sqlite3 import urllib def get_caps(options): far = {} for", "float(form['far_explicit'][0]) if 'lot_explicit' in form and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0])", "'A-1': 6000, 'A-2': 4500, 'C-1A': 1000, 'BC': 500, 'BC-1': 450,", "in form: options['no_lot'] = True if 'singlefamily' in form: options['no_a']", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= get_caps(options) table = [] for i in ['A-1', 'A-2',", "1.0 for i in ['C-1A', 'SD-5']: far[i] = 1.25 for", "m: m = max(int(max_building/800), 1) if zone == \"B\" and", "'SD-6', 'SD-7']: lot_area[i] = 300 for i in lot_area: if", "lot_area.get(i,\"\"))) return \"\\n\".join(table) def unit_cap(row, options=None): if not options: options", "row): d = {} for idx, col in enumerate(cursor.description): d[col[0]]", "continue m += int(t) return m def describe(options): changes =", "myHandler) print 'Started httpserver on port ' , PORT_NUMBER #Wait", "in lot_area: if options and 'lot_explicit' in options: lot_area[i] =", "# distributed under the License is distributed on an \"AS", "far[i] = 4.0 far['BB-1'] = 3.25 far['SD-11'] = 1.7 far['SD-15']", "def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send the html message", "and options['far_factor'] != 1.0: changes.append('increase FAR maximums by a factor", "manage the #incoming request server = HTTPServer(('', PORT_NUMBER), myHandler) print", "# Unless required by applicable law or agreed to in", "'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (i, far.get(i, \"\"), lot_area.get(i,\"\")))", "and 'far_factor' in options: far[i] = far[i] * float(options['far_factor']) if", "['A-1', 'A-2'] and not 'no_a' in options: return 1 #print", "maximums by a factor of %s' % options['far_factor']) if len(changes):", "for i in ['far_factor', 'lot_factor']: if i in form: options[i]", "'lot' in form: options['no_lot'] = True if 'singlefamily' in form:", "2) return m def dict_factory(cursor, row): d = {} for", "lot_area[i] = 300 for i in lot_area: if options and", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "limits in B zones') if 'far_explicit' in options: changes.append(\"set all", "options: lot_area[i] = options['lot_explicit'] elif options and 'lot_factor' in options:", "html message form = parse_qs(urlparse(self.path).query) options = {} for i", "for i in ['A-1', 'A-2', 'B', 'SD-2']: far[i] = 0.5", "i in ['C-1A', 'SD-5']: far[i] = 1.25 for i in", "far, lot_area = get_caps(options) zone = row['zone'] if (not zone.startswith(\"C\")", "in ['A-1', 'A-2'] and not 'no_a' in options: return 1", "8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send", "form = parse_qs(urlparse(self.path).query) options = {} for i in ['far_factor',", "You may obtain a copy of the License at #", "the #incoming request server = HTTPServer(('', PORT_NUMBER), myHandler) print 'Started", "'SD-13']: far[i] = 1.5 for i in ['C-2', 'C-2B', 'BA',", "def dict_factory(cursor, row): d = {} for idx, col in", "= row['zone'] if (not zone.startswith(\"C\") and not zone in (\"A-1\",", "= 3.25 far['SD-11'] = 1.7 far['SD-15'] = 3.5 lot_area =", "in ['C-1A', 'SD-5']: far[i] = 1.25 for i in ['IA-1',", "A-2 zones') if 'no_b' in options: changes.append('eliminate two-family zoning limits", "far[i] = options['far_explicit'] elif options and 'far_factor' in options: far[i]", "'SD-13']: lot_area[i] = 600 for i in ['C-2A', 'C-3', 'C-3A',", "'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i] = 3.0 for", "if zone == \"B\" and not 'no_b' in options: m", "the Apache License, Version 2.0 (the \"License\"); # you may", "urllib def get_caps(options): far = {} for i in ['A-1',", "= min(m, 2) return m def dict_factory(cursor, row): d =", "unit_count-37453 data['table'] = table(options) data['options'] = options s = template.render(**data)", "= True if 'twofamily' in form: options['no_b'] = True self.wfile.write(serve(options))", "options['far_explicit'] elif options and 'far_factor' in options: far[i] = far[i]" ]
[]
[ "xmin = max(float(xcen) - float(w) / 2, 0) xmax =", "use file name as a unique image id # path=image_path,", "line.replace('\\n', '') if len(line.split(' ')) < 5: break classIndex, xcen,", "if len(line.split(' ')) < 5: break classIndex, xcen, ycen, w,", "1) ymin = max(float(ycen) - float(h) / 2, 0) ymax", "= int(height * ymin) ymax = int(height * ymax) location=(xmin,ymin,xmax,ymax)", "in lines: line = line.replace('\\n', '') if len(line.split(' ')) <", "lines = fp.readlines() for line in lines: line = line.replace('\\n',", "float(w) / 2, 0) xmax = min(float(xcen) + float(w) /", "ymax = int(height * ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val'", "= max(float(ycen) - float(h) / 2, 0) ymax = min(float(ycen)", "for image_path in glob.glob(imagesPattern): print(image_path) img = cv2.imread(image_path) height,width =", "= np.zeros([height, width, len(locations)], dtype=np.uint8) for index,location in enumerate(locations): x1,", "as a unique image id # path=image_path, # width=width, height=height,", "min(float(ycen) + float(h) / 2, 1) xmin = int(width *", "id # path=image_path, # width=width, height=height, # polygons=polygons) locationsFile='%s/%s.txt' %", "sys import json import datetime import numpy as np import", "cv2.imread(image_path) height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # # self.add_image( #", "xmin) xmax = int(width * xmax) ymin = int(height *", "location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 #", "+ float(w) / 2, 1) ymin = max(float(ycen) - float(h)", "5: break classIndex, xcen, ycen, w, h = line.strip().split(' ')", "# use file name as a unique image id #", "locationToMask(locations=None,height=None,width=None): mask = np.zeros([height, width, len(locations)], dtype=np.uint8) for index,location in", "in enumerate(locations): x1, y1, x2, y2 = location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index])", "import Image as pil_image import cv2 import cv2 def locationToMask(locations=None,height=None,width=None):", "line in lines: line = line.replace('\\n', '') if len(line.split(' '))", "import datetime import numpy as np import glob import skimage", "datetime import numpy as np import glob import skimage from", "folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for image_path in glob.glob(imagesPattern): print(image_path) img =", "return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg'", "0) xmax = min(float(xcen) + float(w) / 2, 1) ymin", "import numpy as np import glob import skimage from PIL", "<filename>samples/cmk/test.py import os import sys import json import datetime import", "a unique image id # path=image_path, # width=width, height=height, #", "xmin = int(width * xmin) xmax = int(width * xmax)", "print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset)", "image_path in glob.glob(imagesPattern): print(image_path) img = cv2.imread(image_path) height,width = img.shape[:2]", "print(imageId) # # self.add_image( # \"balloon\", # image_id=a['filename'], # use", "dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 # mask,classIds=locationToMask(locations=locations,height=height,width=width) # print(mask)", "y1, x2, y2 = location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]],", "(folder,imageId) locations=[] with open(locationsFile) as fp: lines = fp.readlines() for", "min(float(xcen) + float(w) / 2, 1) ymin = max(float(ycen) -", "def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for image_path in glob.glob(imagesPattern):", "load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 # mask,classIds=locationToMask(locations=locations,height=height,width=width) # print(mask) # print(classIds)", "image_id=a['filename'], # use file name as a unique image id", "json import datetime import numpy as np import glob import", "2, 0) ymax = min(float(ycen) + float(h) / 2, 1)", "import cv2 import cv2 def locationToMask(locations=None,height=None,width=None): mask = np.zeros([height, width,", "file name as a unique image id # path=image_path, #", "classIndex, xcen, ycen, w, h = line.strip().split(' ') xmin =", "= line.strip().split(' ') xmin = max(float(xcen) - float(w) / 2,", "/ 2, 0) xmax = min(float(xcen) + float(w) / 2,", "len(locations)], dtype=np.uint8) for index,location in enumerate(locations): x1, y1, x2, y2", "as pil_image import cv2 import cv2 def locationToMask(locations=None,height=None,width=None): mask =", "height=height, # polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId) locations=[] with open(locationsFile) as", "print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 # mask,classIds=locationToMask(locations=locations,height=height,width=width) #", "name as a unique image id # path=image_path, # width=width,", "pil_image import cv2 import cv2 def locationToMask(locations=None,height=None,width=None): mask = np.zeros([height,", "location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir, subset):", "< 5: break classIndex, xcen, ycen, w, h = line.strip().split('", "np import glob import skimage from PIL import Image as", "= fp.readlines() for line in lines: line = line.replace('\\n', '')", "for index,location in enumerate(locations): x1, y1, x2, y2 = location", "lines: line = line.replace('\\n', '') if len(line.split(' ')) < 5:", "ymax = min(float(ycen) + float(h) / 2, 1) xmin =", "from PIL import Image as pil_image import cv2 import cv2", "/ 2, 0) ymax = min(float(ycen) + float(h) / 2,", "= location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir,", "max(float(ycen) - float(h) / 2, 0) ymax = min(float(ycen) +", "xmax) ymin = int(height * ymin) ymax = int(height *", "img = cv2.imread(image_path) height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # #", "np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for image_path", "import json import datetime import numpy as np import glob", "% (folder,imageId) locations=[] with open(locationsFile) as fp: lines = fp.readlines()", "def locationToMask(locations=None,height=None,width=None): mask = np.zeros([height, width, len(locations)], dtype=np.uint8) for index,location", "unique image id # path=image_path, # width=width, height=height, # polygons=polygons)", "open(locationsFile) as fp: lines = fp.readlines() for line in lines:", "dtype=np.uint8) for index,location in enumerate(locations): x1, y1, x2, y2 =", "= max(float(xcen) - float(w) / 2, 0) xmax = min(float(xcen)", "Image as pil_image import cv2 import cv2 def locationToMask(locations=None,height=None,width=None): mask", "imagesPattern=folder+'/*.jpg' for image_path in glob.glob(imagesPattern): print(image_path) img = cv2.imread(image_path) height,width", "') xmin = max(float(xcen) - float(w) / 2, 0) xmax", "import os import sys import json import datetime import numpy", "enumerate(locations): x1, y1, x2, y2 = location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return", "load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for image_path in glob.glob(imagesPattern): print(image_path)", "as fp: lines = fp.readlines() for line in lines: line", "imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # # self.add_image( # \"balloon\", # image_id=a['filename'], #", "dtype=np.int32) def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for image_path in", "# \"balloon\", # image_id=a['filename'], # use file name as a", "2, 0) xmax = min(float(xcen) + float(w) / 2, 1)", "2, 1) xmin = int(width * xmin) xmax = int(width", "mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir,", "height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # # self.add_image( # \"balloon\",", "- float(h) / 2, 0) ymax = min(float(ycen) + float(h)", "len(line.split(' ')) < 5: break classIndex, xcen, ycen, w, h", "ymin = max(float(ycen) - float(h) / 2, 0) ymax =", "1) xmin = int(width * xmin) xmax = int(width *", "# image_id=a['filename'], # use file name as a unique image", "- float(w) / 2, 0) xmax = min(float(xcen) + float(w)", "\"balloon\", # image_id=a['filename'], # use file name as a unique", "line = line.replace('\\n', '') if len(line.split(' ')) < 5: break", "mask = np.zeros([height, width, len(locations)], dtype=np.uint8) for index,location in enumerate(locations):", "0) ymax = min(float(ycen) + float(h) / 2, 1) xmin", "line.strip().split(' ') xmin = max(float(xcen) - float(w) / 2, 0)", "= img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # # self.add_image( # \"balloon\", #", "xmax = int(width * xmax) ymin = int(height * ymin)", "* ymin) ymax = int(height * ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations)", "image id # path=image_path, # width=width, height=height, # polygons=polygons) locationsFile='%s/%s.txt'", "= cv2.imread(image_path) height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # # self.add_image(", "glob import skimage from PIL import Image as pil_image import", "# self.add_image( # \"balloon\", # image_id=a['filename'], # use file name", "int(width * xmax) ymin = int(height * ymin) ymax =", "# polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId) locations=[] with open(locationsFile) as fp:", "int(width * xmin) xmax = int(width * xmax) ymin =", "index,location in enumerate(locations): x1, y1, x2, y2 = location mask[y1:y2+1,x1:x2+1,index]=1", "skimage from PIL import Image as pil_image import cv2 import", "= min(float(ycen) + float(h) / 2, 1) xmin = int(width", "import glob import skimage from PIL import Image as pil_image", "in glob.glob(imagesPattern): print(image_path) img = cv2.imread(image_path) height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','')", "fp.readlines() for line in lines: line = line.replace('\\n', '') if", "= line.replace('\\n', '') if len(line.split(' ')) < 5: break classIndex,", "import skimage from PIL import Image as pil_image import cv2", "2, 1) ymin = max(float(ycen) - float(h) / 2, 0)", "+ float(h) / 2, 1) xmin = int(width * xmin)", "ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10", "width, len(locations)], dtype=np.uint8) for index,location in enumerate(locations): x1, y1, x2,", "cv2 def locationToMask(locations=None,height=None,width=None): mask = np.zeros([height, width, len(locations)], dtype=np.uint8) for", "import cv2 def locationToMask(locations=None,height=None,width=None): mask = np.zeros([height, width, len(locations)], dtype=np.uint8)", "w, h = line.strip().split(' ') xmin = max(float(xcen) - float(w)", "float(h) / 2, 1) xmin = int(width * xmin) xmax", "max(float(xcen) - float(w) / 2, 0) xmax = min(float(xcen) +", "ymin) ymax = int(height * ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/'", "cv2 import cv2 def locationToMask(locations=None,height=None,width=None): mask = np.zeros([height, width, len(locations)],", "/ 2, 1) ymin = max(float(ycen) - float(h) / 2,", "')) < 5: break classIndex, xcen, ycen, w, h =", "* xmax) ymin = int(height * ymin) ymax = int(height", "for line in lines: line = line.replace('\\n', '') if len(line.split('", "int(height * ymin) ymax = int(height * ymax) location=(xmin,ymin,xmax,ymax) locations.append(location)", "x2, y2 = location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "os import sys import json import datetime import numpy as", "int(height * ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)]", "/ 2, 1) xmin = int(width * xmin) xmax =", "as np import glob import skimage from PIL import Image", "= int(height * ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset)", "locationsFile='%s/%s.txt' % (folder,imageId) locations=[] with open(locationsFile) as fp: lines =", "glob.glob(imagesPattern): print(image_path) img = cv2.imread(image_path) height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId)", "import sys import json import datetime import numpy as np", "x1, y1, x2, y2 = location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool),", "ycen, w, h = line.strip().split(' ') xmin = max(float(xcen) -", "float(w) / 2, 1) ymin = max(float(ycen) - float(h) /", "* ymax) location=(xmin,ymin,xmax,ymax) locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10", "ymin = int(height * ymin) ymax = int(height * ymax)", "img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) # # self.add_image( # \"balloon\", # image_id=a['filename'],", "locations=[] with open(locationsFile) as fp: lines = fp.readlines() for line", "# width=width, height=height, # polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId) locations=[] with", "np.zeros([height, width, len(locations)], dtype=np.uint8) for index,location in enumerate(locations): x1, y1,", "= int(width * xmin) xmax = int(width * xmax) ymin", "with open(locationsFile) as fp: lines = fp.readlines() for line in", "path=image_path, # width=width, height=height, # polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId) locations=[]", "= int(width * xmax) ymin = int(height * ymin) ymax", "y2 = location mask[y1:y2+1,x1:x2+1,index]=1 print(mask[:,:,index]) return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def", "# # self.add_image( # \"balloon\", # image_id=a['filename'], # use file", "subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for image_path in glob.glob(imagesPattern): print(image_path) img", "h = line.strip().split(' ') xmin = max(float(xcen) - float(w) /", "xmax = min(float(xcen) + float(w) / 2, 1) ymin =", "width=width, height=height, # polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId) locations=[] with open(locationsFile)", "numpy as np import glob import skimage from PIL import", "mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def load_cmk(dataset_dir, subset): folder=os.path.join(dataset_dir, subset) imagesPattern=folder+'/*.jpg' for", "break classIndex, xcen, ycen, w, h = line.strip().split(' ') xmin", "fp: lines = fp.readlines() for line in lines: line =", "print(image_path) img = cv2.imread(image_path) height,width = img.shape[:2] imageId=os.path.basename(image_path).replace('.jpg','') print(imageId) #", "subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 # mask,classIds=locationToMask(locations=locations,height=height,width=width) # print(mask) #", "'') if len(line.split(' ')) < 5: break classIndex, xcen, ycen,", "polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId) locations=[] with open(locationsFile) as fp: lines", "* xmin) xmax = int(width * xmax) ymin = int(height", "# path=image_path, # width=width, height=height, # polygons=polygons) locationsFile='%s/%s.txt' % (folder,imageId)", "self.add_image( # \"balloon\", # image_id=a['filename'], # use file name as", "float(h) / 2, 0) ymax = min(float(ycen) + float(h) /", "PIL import Image as pil_image import cv2 import cv2 def", "locations.append(location) print(locations) dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 # mask,classIds=locationToMask(locations=locations,height=height,width=width)", "subset) imagesPattern=folder+'/*.jpg' for image_path in glob.glob(imagesPattern): print(image_path) img = cv2.imread(image_path)", "= min(float(xcen) + float(w) / 2, 1) ymin = max(float(ycen)", "xcen, ycen, w, h = line.strip().split(' ') xmin = max(float(xcen)" ]
[ "u = soup.select('#u1 a') for i in u: print(\"名称:%s,地址:%s\" %", "soup.prettify(); u = soup.select('#u1 a') for i in u: print(\"名称:%s,地址:%s\"", "'http://www.baidu.com'; with requests.get(url) as r: r.encoding='utf-8' soup = BeautifulSoup(r.text) #格式化", "#Author:Winston.Wang import requests from bs4 import BeautifulSoup print(dir(BeautifulSoup)) url =", "r.encoding='utf-8' soup = BeautifulSoup(r.text) #格式化 pret = soup.prettify(); u =", "pret = soup.prettify(); u = soup.select('#u1 a') for i in", "import BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with requests.get(url) as r:", "soup = BeautifulSoup(r.text) #格式化 pret = soup.prettify(); u = soup.select('#u1", "url = 'http://www.baidu.com'; with requests.get(url) as r: r.encoding='utf-8' soup =", "r: r.encoding='utf-8' soup = BeautifulSoup(r.text) #格式化 pret = soup.prettify(); u", "print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with requests.get(url) as r: r.encoding='utf-8' soup", "#格式化 pret = soup.prettify(); u = soup.select('#u1 a') for i", "requests.get(url) as r: r.encoding='utf-8' soup = BeautifulSoup(r.text) #格式化 pret =", "-*- #Author:Winston.Wang import requests from bs4 import BeautifulSoup print(dir(BeautifulSoup)) url", "= soup.select('#u1 a') for i in u: print(\"名称:%s,地址:%s\" % (i.getText(),i.get('href')))", "as r: r.encoding='utf-8' soup = BeautifulSoup(r.text) #格式化 pret = soup.prettify();", "from bs4 import BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with requests.get(url)", "# -*- coding: utf-8 -*- #Author:Winston.Wang import requests from bs4", "BeautifulSoup(r.text) #格式化 pret = soup.prettify(); u = soup.select('#u1 a') for", "utf-8 -*- #Author:Winston.Wang import requests from bs4 import BeautifulSoup print(dir(BeautifulSoup))", "with requests.get(url) as r: r.encoding='utf-8' soup = BeautifulSoup(r.text) #格式化 pret", "import requests from bs4 import BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com';", "= BeautifulSoup(r.text) #格式化 pret = soup.prettify(); u = soup.select('#u1 a')", "BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with requests.get(url) as r: r.encoding='utf-8'", "coding: utf-8 -*- #Author:Winston.Wang import requests from bs4 import BeautifulSoup", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- #Author:Winston.Wang import requests", "-*- coding: utf-8 -*- #Author:Winston.Wang import requests from bs4 import", "python3 # -*- coding: utf-8 -*- #Author:Winston.Wang import requests from", "= 'http://www.baidu.com'; with requests.get(url) as r: r.encoding='utf-8' soup = BeautifulSoup(r.text)", "bs4 import BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with requests.get(url) as", "requests from bs4 import BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with", "= soup.prettify(); u = soup.select('#u1 a') for i in u:" ]
[ "views.newsView, name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(),", "import views urlpatterns = [ path(\"\", views.newsView, name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(),", "views urlpatterns = [ path(\"\", views.newsView, name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"),", "path(\"\", views.newsView, name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\",", "path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView, name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(),", "views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView, name=\"about\"),", "views.LikeView, name=\"like_post\"), # API urls for superuser path(\"api/create/\", views.APICreateView.as_view()), path(\"api/posts/\",", "<gh_stars>1-10 from django.urls import path, include from . import views", "path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"), # API urls for superuser path(\"api/create/\", views.APICreateView.as_view()),", "include from . import views urlpatterns = [ path(\"\", views.newsView,", "from django.urls import path, include from . import views urlpatterns", "from . import views urlpatterns = [ path(\"\", views.newsView, name=\"home\"),", "urlpatterns = [ path(\"\", views.newsView, name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\",", "name=\"like_post\"), # API urls for superuser path(\"api/create/\", views.APICreateView.as_view()), path(\"api/posts/\", views.APIListView.as_view()),", "name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"), # API urls", ". import views urlpatterns = [ path(\"\", views.newsView, name=\"home\"), path(\"createBlog\",", "name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView, name=\"about\"), path(\"edit/<int:pk>\",", "path(\"about\", views.aboutView, name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\",", "views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"), # API", "name=\"delete\"), path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"), # API urls for superuser path(\"api/create/\",", "name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"),", "views.aboutView, name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\", views.LikeView,", "# API urls for superuser path(\"api/create/\", views.APICreateView.as_view()), path(\"api/posts/\", views.APIListView.as_view()), path(\"api/posts/<int:pk>\",", "path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"), #", "[ path(\"\", views.newsView, name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"),", "path, include from . import views urlpatterns = [ path(\"\",", "path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView,", "name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView, name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\",", "API urls for superuser path(\"api/create/\", views.APICreateView.as_view()), path(\"api/posts/\", views.APIListView.as_view()), path(\"api/posts/<int:pk>\", views.APIDetailView.as_view()),", "path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"), # API urls for", "views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"),", "views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView, name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"),", "views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView, name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"),", "urls for superuser path(\"api/create/\", views.APICreateView.as_view()), path(\"api/posts/\", views.APIListView.as_view()), path(\"api/posts/<int:pk>\", views.APIDetailView.as_view()), ]", "= [ path(\"\", views.newsView, name=\"home\"), path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(),", "import path, include from . import views urlpatterns = [", "views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"), # API urls for superuser", "path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\", views.aboutView, name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(),", "django.urls import path, include from . import views urlpatterns =", "name=\"about\"), path(\"edit/<int:pk>\", views.UpdateBlogView.as_view(), name=\"edit\"), path(\"delete/<int:pk>\", views.DeleteBlogView.as_view(), name=\"delete\"), path(\"like/<int:pk>\", views.LikeView, name=\"like_post\"),", "path(\"createBlog\", views.CreateBlogView.as_view(), name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\",", "name=\"createBlog\"), path(\"myBlogs\", views.PostListView.as_view(), name=\"myBlogs\"), path(\"single/<int:pk>\", views.PostDetailView.as_view(), name=\"single\"), path(\"subscribe\", views.subscribeView,name=\"subscribe\"), path(\"about\"," ]
[ "if j == \"^\": isSquared = True unitPreIndex = ''.join(list(i)[:ind])", "\"cd\", \"sr\", \"rad\"] self.derivedUnits = [\"Hz\", \"N\", \"Pa\", \"J\", \"W\",", "your unit to it's base unit: %s.\" % (self.idPrefix, i))", "base unit if(i in (self.baseUnits or self.derivedUnits) and isSquared ==", "break else: #append in case for base unit break #Appends", "_purpose_ = \"Sets up the unit class\" class Unit: '''This", "%s and converted your unit to it's base unit: %s.\"", "checks for squared variables while(i not in (self.baseUnits or self.derivedUnits)", "specified by the carat for l in range (intReps): if(''.join(toAppend)", "\"A\", \"s\", \"K\", \"mol\", \"cd\", \"sr\", \"rad\"] self.derivedUnits = [\"Hz\",", "in (self.baseUnits and self.derivedUnits)): #append in case for special units", "it has a carat in the expression for ind, j", "we will add it to the product regardless.\" % ''.join(toAppend))", "enumerate(list(i)): if j == \"^\": isSquared = True unitPreIndex =", "print(\"Your variable %s was not in the commonly used units", "used units OR it is a derived unit such as", "isSquared == False): converted.append(i) elif(isSquared == True): toAppend = []", "number of times the unit is squared for index, val", "converted.append(''.join(toAppend)) #Exception for special units else: print(\"Your variable %s was", "unit to base unit and checks for squared variables while(i", "in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1): orgNameList =", "val == \"^\": numStart = index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert", "(self.baseUnits or self.derivedUnits) and isSquared == False): converted.append(i) elif(isSquared ==", "unit if(i not in (self.baseUnits and self.derivedUnits)): #append in case", "class\" class Unit: '''This is a class of lists''' def", "or self.derivedUnits) and len(list(i)) != 1 and unitPreIndex not in", "number of units specified by the carat for l in", "newtons -- we will add it to the product regardless.\"", "lists''' def __init__(self): self.baseUnits = [\"m\", \"kg\", \"A\", \"s\", \"K\",", "case for base unit break #Appends base unit if(i in", "1): orgNameList = list(i) #identify prefix removed self.idPrefix = orgNameList.pop(0)", "== False): converted.append(i) elif(isSquared == True): toAppend = [] numReps", "index, val in enumerate(list(i)): if val == \"^\": numStart =", "%s was not in the commonly used units OR it", "orgNameList = list(i) #identify prefix removed self.idPrefix = orgNameList.pop(0) i", "unit: %s.\" % (self.idPrefix, i)) #checks if it is a", "self.derivedUnits = [\"Hz\", \"N\", \"Pa\", \"J\", \"W\", \"C\", \"V\", \"F\",", "= [\"m\", \"kg\", \"A\", \"s\", \"K\", \"mol\", \"cd\", \"sr\", \"rad\"]", "\"ohm\", \"S\", \"Wb\", \"T\", \"H\", \"°C\", \"lm\", \"lx\", \"Bq\", \"Gy\",", "self.derivedUnits) and len(unitPreIndex) != 1): orgNameList = list(i) #identify prefix", "units specified by the carat for l in range (intReps):", "\"T\", \"H\", \"°C\", \"lm\", \"lx\", \"Bq\", \"Gy\", \"Sv\", \"kat\"] def", "%s.\" % (self.idPrefix, i)) #checks if it is a special", "= int(''.join(numReps)) #append number of units specified by the carat", "#run once to get number of times the unit is", "and converted your unit to it's base unit: %s.\" %", "variable %s was not in the commonly used units OR", "not in (self.baseUnits and self.derivedUnits)): #append in case for special", "ind, j in enumerate(list(i)): if j == \"^\": isSquared =", "#identify prefix removed self.idPrefix = orgNameList.pop(0) i = ''.join(orgNameList) print(\"The", "orgNameList.pop(0) i = ''.join(orgNameList) print(\"The program removed the prefix %s", "not in (self.baseUnits or self.derivedUnits) and len(list(i)) != 1 and", "(self.baseUnits or self.derivedUnits) and len(list(i)) != 1 and unitPreIndex not", "unitPreIndex not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1):", "= list(i) #identify prefix removed self.idPrefix = orgNameList.pop(0) i =", "#checks if it has a carat in the expression for", "program removed the prefix %s and converted your unit to", "and self.derivedUnits)): #append in case for special units break else:", "OR it is a derived unit such as N, newtons", "the product regardless.\" % ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for special units", "\"Pa\", \"J\", \"W\", \"C\", \"V\", \"F\", \"ohm\", \"S\", \"Wb\", \"T\",", "once to get number of times the unit is squared", "''.join(orgNameList) print(\"The program removed the prefix %s and converted your", "'''This is a class of lists''' def __init__(self): self.baseUnits =", "\"S\", \"Wb\", \"T\", \"H\", \"°C\", \"lm\", \"lx\", \"Bq\", \"Gy\", \"Sv\",", "in the expression for ind, j in enumerate(list(i)): if j", "converted.append(i) elif(isSquared == True): toAppend = [] numReps = []", "of times the unit is squared for index, val in", "special units else: print(\"Your variable %s was not in the", "\"^\": isSquared = True unitPreIndex = ''.join(list(i)[:ind]) break #converts non-unary", "userList): '''Converts elements in str list to base units''' converted", "for squared variables while(i not in (self.baseUnits or self.derivedUnits) and", "enumerate(list(i)): if val == \"^\": numStart = index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index]))", "list(i) #identify prefix removed self.idPrefix = orgNameList.pop(0) i = ''.join(orgNameList)", "!= 1 and unitPreIndex not in (self.baseUnits or self.derivedUnits) and", "% ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for special units else: print(\"Your variable", "was not in the commonly used units OR it is", "else: print(\"Your variable %s was not in the commonly used", "range (intReps): if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)): print(\"Your variable", "to it's base unit: %s.\" % (self.idPrefix, i)) #checks if", "and len(list(i)) != 1 and unitPreIndex not in (self.baseUnits or", "\"^\": numStart = index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert numReps into", "self.derivedUnits)): print(\"Your variable %s was not in the commonly used", "and unitPreIndex not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) !=", "def __init__(self): self.baseUnits = [\"m\", \"kg\", \"A\", \"s\", \"K\", \"mol\",", "\"mol\", \"cd\", \"sr\", \"rad\"] self.derivedUnits = [\"Hz\", \"N\", \"Pa\", \"J\",", "False): converted.append(i) elif(isSquared == True): toAppend = [] numReps =", "numReps into an int intReps = int(''.join(numReps)) #append number of", "\"W\", \"C\", \"V\", \"F\", \"ohm\", \"S\", \"Wb\", \"T\", \"H\", \"°C\",", "prefix %s and converted your unit to it's base unit:", "break #converts non-unary unit to base unit and checks for", "i = ''.join(orgNameList) print(\"The program removed the prefix %s and", "will add it to the product regardless.\" % i) converted.append(i)", "if val == \"^\": numStart = index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break", "-- we will add it to the product regardless.\" %", "base unit break #Appends base unit if(i in (self.baseUnits or", "\"kat\"] def baseCheck(self, userList): '''Converts elements in str list to", "to the product regardless.\" % ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for special", "break #convert numReps into an int intReps = int(''.join(numReps)) #append", "str list to base units''' converted = [] for i", "int intReps = int(''.join(numReps)) #append number of units specified by", "for ind, j in enumerate(list(i)): if j == \"^\": isSquared", "val in enumerate(list(i)): if val == \"^\": numStart = index+1", "#append in case for special units break else: #append in", "it to the product regardless.\" % ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for", "in case for special units break else: #append in case", "(self.baseUnits and self.derivedUnits)): #append in case for special units break", "the prefix %s and converted your unit to it's base", "\"Wb\", \"T\", \"H\", \"°C\", \"lm\", \"lx\", \"Bq\", \"Gy\", \"Sv\", \"kat\"]", "\"<NAME>\" _purpose_ = \"Sets up the unit class\" class Unit:", "unit and checks for squared variables while(i not in (self.baseUnits", "case for special units break else: #append in case for", "if it has a carat in the expression for ind,", "% (self.idPrefix, i)) #checks if it is a special unit", "add it to the product regardless.\" % ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception", "of lists''' def __init__(self): self.baseUnits = [\"m\", \"kg\", \"A\", \"s\",", "len(list(i)) != 1 and unitPreIndex not in (self.baseUnits or self.derivedUnits)", "[] for i in (userList): isSquared = False unitPreIndex =", "for i in (userList): isSquared = False unitPreIndex = \"\"", "l in range (intReps): if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)):", "True unitPreIndex = ''.join(list(i)[:ind]) break #converts non-unary unit to base", "and len(unitPreIndex) != 1): orgNameList = list(i) #identify prefix removed", "\"sr\", \"rad\"] self.derivedUnits = [\"Hz\", \"N\", \"Pa\", \"J\", \"W\", \"C\",", "squared variables while(i not in (self.baseUnits or self.derivedUnits) and len(list(i))", "it is a derived unit such as N, newtons --", "\"F\", \"ohm\", \"S\", \"Wb\", \"T\", \"H\", \"°C\", \"lm\", \"lx\", \"Bq\",", "unit to it's base unit: %s.\" % (self.idPrefix, i)) #checks", "\"lx\", \"Bq\", \"Gy\", \"Sv\", \"kat\"] def baseCheck(self, userList): '''Converts elements", "print(\"The program removed the prefix %s and converted your unit", "to get number of times the unit is squared for", "commonly used units OR it is a derived unit such", "for special units else: print(\"Your variable %s was not in", "or self.derivedUnits)): print(\"Your variable %s was not in the commonly", "= True unitPreIndex = ''.join(list(i)[:ind]) break #converts non-unary unit to", "False unitPreIndex = \"\" #checks if it has a carat", "for l in range (intReps): if(''.join(toAppend) not in (self.baseUnits or", "a class of lists''' def __init__(self): self.baseUnits = [\"m\", \"kg\",", "as N, newtons -- we will add it to the", "the expression for ind, j in enumerate(list(i)): if j ==", "def baseCheck(self, userList): '''Converts elements in str list to base", "\"Sets up the unit class\" class Unit: '''This is a", "\"s\", \"K\", \"mol\", \"cd\", \"sr\", \"rad\"] self.derivedUnits = [\"Hz\", \"N\",", "intReps = int(''.join(numReps)) #append number of units specified by the", "#!/usr/bin/python _author_ = \"<NAME>\" _purpose_ = \"Sets up the unit", "1 and unitPreIndex not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex)", "\"lm\", \"lx\", \"Bq\", \"Gy\", \"Sv\", \"kat\"] def baseCheck(self, userList): '''Converts", "len(unitPreIndex) != 1): orgNameList = list(i) #identify prefix removed self.idPrefix", "#append in case for base unit break #Appends base unit", "== True): toAppend = [] numReps = [] #run once", "in (self.baseUnits or self.derivedUnits) and isSquared == False): converted.append(i) elif(isSquared", "toAppend = [] numReps = [] #run once to get", "= index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert numReps into an int", "is a derived unit such as N, newtons -- we", "prefix removed self.idPrefix = orgNameList.pop(0) i = ''.join(orgNameList) print(\"The program", "units OR it is a derived unit such as N,", "if(i in (self.baseUnits or self.derivedUnits) and isSquared == False): converted.append(i)", "in (self.baseUnits or self.derivedUnits) and len(list(i)) != 1 and unitPreIndex", "and checks for squared variables while(i not in (self.baseUnits or", "= False unitPreIndex = \"\" #checks if it has a", "(self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1): orgNameList = list(i)", "carat in the expression for ind, j in enumerate(list(i)): if", "numReps = [] #run once to get number of times", "\"H\", \"°C\", \"lm\", \"lx\", \"Bq\", \"Gy\", \"Sv\", \"kat\"] def baseCheck(self,", "elements in str list to base units''' converted = []", "unitPreIndex = ''.join(list(i)[:ind]) break #converts non-unary unit to base unit", "we will add it to the product regardless.\" % i)", "to base unit and checks for squared variables while(i not", "times the unit is squared for index, val in enumerate(list(i)):", "[\"Hz\", \"N\", \"Pa\", \"J\", \"W\", \"C\", \"V\", \"F\", \"ohm\", \"S\",", "#append number of units specified by the carat for l", "a derived unit such as N, newtons -- we will", "unit such as N, newtons -- we will add it", "== \"^\": isSquared = True unitPreIndex = ''.join(list(i)[:ind]) break #converts", "such as N, newtons -- we will add it to", "self.derivedUnits)): #append in case for special units break else: #append", "for index, val in enumerate(list(i)): if val == \"^\": numStart", "for special units break else: #append in case for base", "base units''' converted = [] for i in (userList): isSquared", "= ''.join(list(i)[:ind]) break #converts non-unary unit to base unit and", "it is a special unit if(i not in (self.baseUnits and", "= [] numReps = [] #run once to get number", "''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for special units else: print(\"Your variable %s", "= orgNameList.pop(0) i = ''.join(orgNameList) print(\"The program removed the prefix", "variables while(i not in (self.baseUnits or self.derivedUnits) and len(list(i)) !=", "self.derivedUnits) and isSquared == False): converted.append(i) elif(isSquared == True): toAppend", "(self.idPrefix, i)) #checks if it is a special unit if(i", "N, newtons -- we will add it to the product", "special units break else: #append in case for base unit", "'''Converts elements in str list to base units''' converted =", "not in the commonly used units OR it is a", "if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)): print(\"Your variable %s was", "Unit: '''This is a class of lists''' def __init__(self): self.baseUnits", "baseCheck(self, userList): '''Converts elements in str list to base units'''", "to base units''' converted = [] for i in (userList):", "a carat in the expression for ind, j in enumerate(list(i)):", "isSquared = False unitPreIndex = \"\" #checks if it has", "get number of times the unit is squared for index,", "\"\" #checks if it has a carat in the expression", "!= 1): orgNameList = list(i) #identify prefix removed self.idPrefix =", "#convert numReps into an int intReps = int(''.join(numReps)) #append number", "is squared for index, val in enumerate(list(i)): if val ==", "expression for ind, j in enumerate(list(i)): if j == \"^\":", "#Exception for special units else: print(\"Your variable %s was not", "= ''.join(orgNameList) print(\"The program removed the prefix %s and converted", "(self.baseUnits or self.derivedUnits)): print(\"Your variable %s was not in the", "unit if(i in (self.baseUnits or self.derivedUnits) and isSquared == False):", "== \"^\": numStart = index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert numReps", "j in enumerate(list(i)): if j == \"^\": isSquared = True", "into an int intReps = int(''.join(numReps)) #append number of units", "it's base unit: %s.\" % (self.idPrefix, i)) #checks if it", "[\"m\", \"kg\", \"A\", \"s\", \"K\", \"mol\", \"cd\", \"sr\", \"rad\"] self.derivedUnits", "break #Appends base unit if(i in (self.baseUnits or self.derivedUnits) and", "int(''.join(numReps)) #append number of units specified by the carat for", "= \"\" #checks if it has a carat in the", "or self.derivedUnits) and isSquared == False): converted.append(i) elif(isSquared == True):", "numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert numReps into an int intReps =", "= [\"Hz\", \"N\", \"Pa\", \"J\", \"W\", \"C\", \"V\", \"F\", \"ohm\",", "\"C\", \"V\", \"F\", \"ohm\", \"S\", \"Wb\", \"T\", \"H\", \"°C\", \"lm\",", "while(i not in (self.baseUnits or self.derivedUnits) and len(list(i)) != 1", "the unit is squared for index, val in enumerate(list(i)): if", "unit class\" class Unit: '''This is a class of lists'''", "j == \"^\": isSquared = True unitPreIndex = ''.join(list(i)[:ind]) break", "and isSquared == False): converted.append(i) elif(isSquared == True): toAppend =", "\"Sv\", \"kat\"] def baseCheck(self, userList): '''Converts elements in str list", "\"J\", \"W\", \"C\", \"V\", \"F\", \"ohm\", \"S\", \"Wb\", \"T\", \"H\",", "self.baseUnits = [\"m\", \"kg\", \"A\", \"s\", \"K\", \"mol\", \"cd\", \"sr\",", "up the unit class\" class Unit: '''This is a class", "an int intReps = int(''.join(numReps)) #append number of units specified", "class of lists''' def __init__(self): self.baseUnits = [\"m\", \"kg\", \"A\",", "removed self.idPrefix = orgNameList.pop(0) i = ''.join(orgNameList) print(\"The program removed", "(intReps): if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)): print(\"Your variable %s", "in range (intReps): if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)): print(\"Your", "units''' converted = [] for i in (userList): isSquared =", "the carat for l in range (intReps): if(''.join(toAppend) not in", "elif(isSquared == True): toAppend = [] numReps = [] #run", "\"V\", \"F\", \"ohm\", \"S\", \"Wb\", \"T\", \"H\", \"°C\", \"lm\", \"lx\",", "self.derivedUnits) and len(list(i)) != 1 and unitPreIndex not in (self.baseUnits", "converted your unit to it's base unit: %s.\" % (self.idPrefix,", "list to base units''' converted = [] for i in", "converted = [] for i in (userList): isSquared = False", "the unit class\" class Unit: '''This is a class of", "units else: print(\"Your variable %s was not in the commonly", "in str list to base units''' converted = [] for", "\"kg\", \"A\", \"s\", \"K\", \"mol\", \"cd\", \"sr\", \"rad\"] self.derivedUnits =", "base unit and checks for squared variables while(i not in", "(userList): isSquared = False unitPreIndex = \"\" #checks if it", "i)) #checks if it is a special unit if(i not", "True): toAppend = [] numReps = [] #run once to", "\"N\", \"Pa\", \"J\", \"W\", \"C\", \"V\", \"F\", \"ohm\", \"S\", \"Wb\",", "units break else: #append in case for base unit break", "non-unary unit to base unit and checks for squared variables", "numStart = index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert numReps into an", "if(i not in (self.baseUnits and self.derivedUnits)): #append in case for", "#converts non-unary unit to base unit and checks for squared", "is a special unit if(i not in (self.baseUnits and self.derivedUnits)):", "unit break #Appends base unit if(i in (self.baseUnits or self.derivedUnits)", "#Appends base unit if(i in (self.baseUnits or self.derivedUnits) and isSquared", "regardless.\" % ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for special units else: print(\"Your", "= [] for i in (userList): isSquared = False unitPreIndex", "for base unit break #Appends base unit if(i in (self.baseUnits", "else: #append in case for base unit break #Appends base", "derived unit such as N, newtons -- we will add", "a special unit if(i not in (self.baseUnits and self.derivedUnits)): #append", "index+1 numReps.append(''.join(list(i)[numStart:])) toAppend.append(''.join(list(i)[:index])) break #convert numReps into an int intReps", "of units specified by the carat for l in range", "= [] #run once to get number of times the", "add it to the product regardless.\" % i) converted.append(i) return(converted)", "i in (userList): isSquared = False unitPreIndex = \"\" #checks", "\"Bq\", \"Gy\", \"Sv\", \"kat\"] def baseCheck(self, userList): '''Converts elements in", "is a class of lists''' def __init__(self): self.baseUnits = [\"m\",", "unitPreIndex = \"\" #checks if it has a carat in", "if it is a special unit if(i not in (self.baseUnits", "base unit: %s.\" % (self.idPrefix, i)) #checks if it is", "''.join(list(i)[:ind]) break #converts non-unary unit to base unit and checks", "class Unit: '''This is a class of lists''' def __init__(self):", "_author_ = \"<NAME>\" _purpose_ = \"Sets up the unit class\"", "in (self.baseUnits or self.derivedUnits)): print(\"Your variable %s was not in", "not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1): orgNameList", "[] numReps = [] #run once to get number of", "= \"Sets up the unit class\" class Unit: '''This is", "has a carat in the expression for ind, j in", "product regardless.\" % ''.join(toAppend)) converted.append(''.join(toAppend)) #Exception for special units else:", "in enumerate(list(i)): if val == \"^\": numStart = index+1 numReps.append(''.join(list(i)[numStart:]))", "carat for l in range (intReps): if(''.join(toAppend) not in (self.baseUnits", "the commonly used units OR it is a derived unit", "in enumerate(list(i)): if j == \"^\": isSquared = True unitPreIndex", "self.idPrefix = orgNameList.pop(0) i = ''.join(orgNameList) print(\"The program removed the", "in (userList): isSquared = False unitPreIndex = \"\" #checks if", "by the carat for l in range (intReps): if(''.join(toAppend) not", "= \"<NAME>\" _purpose_ = \"Sets up the unit class\" class", "toAppend.append(''.join(list(i)[:index])) break #convert numReps into an int intReps = int(''.join(numReps))", "isSquared = True unitPreIndex = ''.join(list(i)[:ind]) break #converts non-unary unit", "will add it to the product regardless.\" % ''.join(toAppend)) converted.append(''.join(toAppend))", "[] #run once to get number of times the unit", "not in (self.baseUnits or self.derivedUnits)): print(\"Your variable %s was not", "\"K\", \"mol\", \"cd\", \"sr\", \"rad\"] self.derivedUnits = [\"Hz\", \"N\", \"Pa\",", "unit is squared for index, val in enumerate(list(i)): if val", "or self.derivedUnits) and len(unitPreIndex) != 1): orgNameList = list(i) #identify", "#checks if it is a special unit if(i not in", "<reponame>MatthewZheng/UnitsPlease #!/usr/bin/python _author_ = \"<NAME>\" _purpose_ = \"Sets up the", "special unit if(i not in (self.baseUnits and self.derivedUnits)): #append in", "\"rad\"] self.derivedUnits = [\"Hz\", \"N\", \"Pa\", \"J\", \"W\", \"C\", \"V\",", "\"Gy\", \"Sv\", \"kat\"] def baseCheck(self, userList): '''Converts elements in str", "removed the prefix %s and converted your unit to it's", "in the commonly used units OR it is a derived", "in case for base unit break #Appends base unit if(i", "squared for index, val in enumerate(list(i)): if val == \"^\":", "__init__(self): self.baseUnits = [\"m\", \"kg\", \"A\", \"s\", \"K\", \"mol\", \"cd\",", "\"°C\", \"lm\", \"lx\", \"Bq\", \"Gy\", \"Sv\", \"kat\"] def baseCheck(self, userList):" ]
[ "\"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock = \"000002\" api_type = 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40)", "-*- coding: utf-8 -*- \"\"\" Created on Wed Feb 26", "the use of {2}-{1} programming and {0}\" .format(\"programmer\", \"Open\", \"Source\",", "of {2}-{1} programming and {0}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating Systems\"))", "stock = \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every", "= \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every {}", "URL with string.format \"\"\" base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock = \"000002\"", "and {}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating Systems\")) print(\"Every {3} should", "print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every {} should know the use of", "know the use of {}-{} programming and {}\" .format(\"programmer\", \"Open\",", "{}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating Systems\")) print(\"Every {3} should know", "base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock = \"000002\" api_type = 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type)", "2020 @author: <NAME> Try to construct URL with string.format \"\"\"", "print(base_url.format(stock,api_type)) print('='*40) stock = \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236'))", "string.format \"\"\" base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock = \"000002\" api_type =", "construct URL with string.format \"\"\" base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock =", "= \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock = \"000002\" api_type = 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type))", "the use of {}-{} programming and {}\" .format(\"programmer\", \"Open\", \"Source\",", ".format(\"programmer\", \"Open\", \"Source\", \"Operating Systems\")) print(\"Every {3} should know the", "use of {2}-{1} programming and {0}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating", "26 22:23:07 2020 @author: <NAME> Try to construct URL with", "'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) stock = \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40)", "@author: <NAME> Try to construct URL with string.format \"\"\" base_url", "\"\"\" Created on Wed Feb 26 22:23:07 2020 @author: <NAME>", "on Wed Feb 26 22:23:07 2020 @author: <NAME> Try to", "= \"000002\" api_type = 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) stock =", "utf-8 -*- \"\"\" Created on Wed Feb 26 22:23:07 2020", "should know the use of {2}-{1} programming and {0}\" .format(\"programmer\",", "print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every {} should know", "use of {}-{} programming and {}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating", "print(\"Every {3} should know the use of {2}-{1} programming and", "\"Open\", \"Source\", \"Operating Systems\")) print(\"Every {3} should know the use", "{} should know the use of {}-{} programming and {}\"", "{}-{} programming and {}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating Systems\")) print(\"Every", "to construct URL with string.format \"\"\" base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock", "print('='*40) stock = \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236'))", "print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) stock = \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40)", "Wed Feb 26 22:23:07 2020 @author: <NAME> Try to construct", "stock = \"000002\" api_type = 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) stock", "should know the use of {}-{} programming and {}\" .format(\"programmer\",", "know the use of {2}-{1} programming and {0}\" .format(\"programmer\", \"Open\",", "print(\"Every {} should know the use of {}-{} programming and", "programming and {}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating Systems\")) print(\"Every {3}", "\"Source\", \"Operating Systems\")) print(\"Every {3} should know the use of", "\"\"\" base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock = \"000002\" api_type = 'cp'", "print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every {} should know the use", "print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every {} should know the use of {}-{}", "print('{:>06}'.format('236')) print(\"Every {} should know the use of {}-{} programming", "Try to construct URL with string.format \"\"\" base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\"", "Created on Wed Feb 26 22:23:07 2020 @author: <NAME> Try", "\"Operating Systems\")) print(\"Every {3} should know the use of {2}-{1}", "with string.format \"\"\" base_url = \"http://quotes.money.163.com/service/gszl_{:>06}.html?type={}\" stock = \"000002\" api_type", "Feb 26 22:23:07 2020 @author: <NAME> Try to construct URL", "\"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every {} should", "= 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) stock = \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type))", "Systems\")) print(\"Every {3} should know the use of {2}-{1} programming", "{3} should know the use of {2}-{1} programming and {0}\"", "22:23:07 2020 @author: <NAME> Try to construct URL with string.format", "of {}-{} programming and {}\" .format(\"programmer\", \"Open\", \"Source\", \"Operating Systems\"))", "-*- \"\"\" Created on Wed Feb 26 22:23:07 2020 @author:", "coding: utf-8 -*- \"\"\" Created on Wed Feb 26 22:23:07", "\"000002\" api_type = 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) stock = \"00002\"", "<NAME> Try to construct URL with string.format \"\"\" base_url =", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Feb", "print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print(\"Every {} should know the", "api_type = 'cp' print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type) print(base_url.format(stock,api_type)) print('='*40) stock = \"00002\" print(\"http://quotes.money.163.com/service/gszl_\"+stock+\".html?type=\"+api_type)" ]
[ "<gh_stars>1000+ from conans.server.launcher import ServerLauncher from conans.util.env_reader import get_env launcher", "conans.server.launcher import ServerLauncher from conans.util.env_reader import get_env launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\"))", "get_env launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app = launcher.server.root_app def main(*args): launcher.launch()", "ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app = launcher.server.root_app def main(*args): launcher.launch() if __name__ ==", "launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app = launcher.server.root_app def main(*args): launcher.launch() if", "from conans.util.env_reader import get_env launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app = launcher.server.root_app", "= launcher.server.root_app def main(*args): launcher.launch() if __name__ == \"__main__\": main()", "ServerLauncher from conans.util.env_reader import get_env launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app =", "from conans.server.launcher import ServerLauncher from conans.util.env_reader import get_env launcher =", "conans.util.env_reader import get_env launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app = launcher.server.root_app def", "app = launcher.server.root_app def main(*args): launcher.launch() if __name__ == \"__main__\":", "import get_env launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app = launcher.server.root_app def main(*args):", "= ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app = launcher.server.root_app def main(*args): launcher.launch() if __name__", "import ServerLauncher from conans.util.env_reader import get_env launcher = ServerLauncher(server_dir=get_env(\"CONAN_SERVER_HOME\")) app" ]
[ "video. :vartype token: str \"\"\" _validation = { 'expiration_date': {'readonly':", "cameras, as long as the same processing is to be", "content. Default is 'false'. If set to 'true', then \"disableArchive\"", "self.issuers = kwargs.get('issuers', None) self.audiences = kwargs.get('audiences', None) self.claims =", "_attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key':", "list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder preset, which defines the", "account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT", "): super(AudioEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps", "self.retention_period = kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type for all", "stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation = {", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoScale, self).__init__(**kwargs)", "status: The status of the pipeline job operation. :vartype status:", "or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the", ":type description: str :param actions_required: A message indicating if changes", "the resource name is available. :type name_available: bool :param reason:", "= kwargs.get('kind', None) self.sku = kwargs.get('sku', None) self.description = kwargs.get('description',", "_validation = { 'can_stream': {'required': True}, 'has_data': {'required': True}, 'is_in_use':", "resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public", "'tier': {'key': 'tier', 'type': 'str'}, } def __init__( self, **kwargs", "type: str self.alg = kwargs['alg'] self.x = kwargs['x'] self.y =", "identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Key", "'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key':", "publishing of content for a unique RTSP camera. Variables are", "'password': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "either be versioned (for example https://vault/keys/mykey/version1) or reference a key", "\"ClientApi\". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation = {", "of 48 kHz). Allowed values are 96, 112, 128, 160,", "If archiving is enabled, this results in a video of", "be populated in order to send to Azure. :param name:", ":param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions:", "request. :ivar type: The additional info type. :vartype type: str", "\"\"\"A collection of PipelineTopology items. :param value: A collection of", "endpoint: Required. RTSP endpoint information for Video Analyzer to connect", "and it is only used for the initial handshake between", "sources. You probably want to use the sub-classes and not", "order to send to Azure. :param name: Required. The operation", "\"RS512\". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA", "within Azure Video Analyzer. Videos can be ingested from RTSP", "token to download the most recent still image from the", "preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map = { 'download_url': {'key': 'downloadUrl', 'type':", "class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access control for video analyzer account. :param", "VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection of VideoAnalyzer items. :param value: A collection", "the cloud account. The provisioning token itself is short lived", "str or ~video_analyzer.models.SkuTier \"\"\" _validation = { 'name': {'required': True},", "\"\"\"The video scaling information. :param height: The desired output video", "value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type':", "True}, 'edge_module_id': {'readonly': True}, } _attribute_map = { 'id': {'key':", "code: The error code. :vartype code: str :ivar message: The", "pipeline topology defined for real-time content processing. When activated, this", "private link resource Private link DNS zone name. :type required_zone_names:", "Analyzer player widget. Alternatively, this URL can be used in", "for derived types.Constant filled by server. :type type: str :param", "# type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote tunnel securely established", "date. :vartype expiration_date: ~datetime.datetime :ivar token: The token blob to", "is performed. :type resource: str :param operation: The operation type.", ":param role: Defines the access level granted by this policy.", "e: Required. RSA public key exponent. :type e: str \"\"\"", "'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'n': {'key': 'n',", "{ 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type':", "str :param last_modified_by_type: The type of identity that last modified", "{'required': True}, 'time_sequences': {'required': True}, } _attribute_map = { 'type':", "content can be reused across many different cameras, as long", "type: str class AuthenticationBase(msrest.serialization.Model): \"\"\"Base class for access policies authentication", ":type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change how the", "True}, 'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True},", "to be stored as a file, and published via a", "order to send to Azure. :param id: Required. The ID", "super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message = kwargs.get('message', None)", "'name': {'required': True}, 'value': {'required': True}, } _attribute_map = {", "archive playback latency. Value must be specified in ISO8601 duration", "be created by exporting sequences from existing captured video through", "Sinks: list of one or more data sinks which allow", "= kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of", "account. The provisioning token itself is short lived and it", "'[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value", "__init__( self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class", "self).__init__(**kwargs) self.type = None # type: Optional[str] class TokenKey(msrest.serialization.Model): \"\"\"Key", "validate access tokens. Having multiple keys allow for seamless key", ":type ignore_signature: str \"\"\" _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname',", "for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar", "been Approved/Rejected/Removed by the owner of the service. Possible values", "self.ignore_signature = kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): \"\"\"Properties for expected token", "according to the pipeline topology definition. :type topology_name: str :param", "\"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param", "type: str :ivar info: The additional info. :vartype info: any", "self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): \"\"\"A custom preset for", "= '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id =", "for tracking the status of an operation on the pipeline", "or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline", "str or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of resource last", "by the server, and will be ignored when sending a", "'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJobUpdate,", "'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityResponse,", "{'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, }", "source allows for media from an RTSP camera or generic", "list[str] \"\"\" _validation = { 'name': {'readonly': True}, 'display_name': {'readonly':", "are: VideoSink. All required parameters must be populated in order", ":param ignore_signature: When set to 'true' causes the certificate chain", "'@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map", "super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level", "instructions on how the input content should be processed. :type", "{'required': True}, 'has_data': {'required': True}, 'is_in_use': {'required': True}, } _attribute_map", "the pipeline topology definition. :type topology_name: str :param description: An", ":vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'readonly': True},", "{'key': 'blobDuration', 'type': 'str'}, } def __init__( self, **kwargs ):", "of the registration token. The Azure Video Analyzer IoT edge", "nodes allow pipeline data to be stored or exported. :type", "sampling rate of 48 kHz). Allowed values are 96, 112,", "servers. :type endpoint: ~video_analyzer.models.EndpointBase \"\"\" _validation = { 'type': {'required':", "the Video Analyzer resource. Variables are only populated by the", "self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model): \"\"\"The SKU details. Variables are", "RTSP endpoints and credentials. Overall a topology is composed of", "class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job represents a unique instance of a", "**kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): \"\"\"Required", "be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors:", "request. All required parameters must be populated in order to", "in case a new video resource needs to be created", "not specify a value. :type default: str \"\"\" _validation =", "control for video analyzer account. :param integration: Public network access", "All required parameters must be populated in order to send", "= { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required':", "used for the initial handshake between IoT edge module and", "A link to the next page of the collection (when", "'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox',", "{'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates':", "service provider. :type provider: str :param resource: Resource on which", "= { 'value': {'key': 'value', 'type': '[VideoAnalyzer]'}, } def __init__(", "Required. Node name. Must be unique within the topology. :type", "of user-defined parameters, which allow for a topology to be", "if downstream of RTSP source, and if disableArchive is set", "str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for approval/rejection of", "'str'}, } def __init__( self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id", "'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key':", "be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput]", "\"\"\" _attribute_map = { 'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'}, }", ":param ignore_hostname: When set to 'true' causes the certificate subject", "{ 'type': {'required': True}, 'certificates': {'required': True}, } _attribute_map =", "types.Constant filled by server. :type type: str :param certificates: Required.", "'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available", "the video resource can lead to errors when uploading content", "'name': {'key': 'name', 'type': 'str'}, 'video_name': {'key': 'videoName', 'type': 'str'},", ":param parameters: List of the topology parameter declarations. Parameters declared", "properties based on the current video state. :vartype flags: ~video_analyzer.models.VideoFlags", "30 seconds) and can vary between 30 seconds to 5", "code: str :param message: The error message. :type message: str", "): super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs", "'[Operation]'}, } def __init__( self, **kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value", "the use of user-defined parameters, which allow for a topology", "PEM formatted public certificates. One certificate per entry. :type certificates:", "\"\"\" _attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description':", "stored as a file, and published via a video resource", "key rotation of the token signing key. Token signature must", "def __init__( self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags',", "respective values for it to be valid. :type claims: list[~video_analyzer.models.TokenClaim]", "= kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): \"\"\"The", "{'key': 'value', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ):", "'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters',", "values include: \"String\", \"SecretString\", \"Int\", \"Double\", \"Bool\". :type type: str", "The private link resource Private link DNS zone name. :type", "can optionally have default values to be used when they", "desired retention period will be effective within 24 hours. :type", "this results in a video of type 'archive'. If used", "response format.). :param error: The error object. :type error: ~video_analyzer.models.ErrorDetail", ":param video_name: Required. Name of the Video Analyzer video resource", "= { 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly':", ":vartype status: str :ivar error: The error details for the", "the pipeline job operation. :vartype status: str :ivar error: The", "{'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, }", "will be effective within 24 hours. :type retention_period: str \"\"\"", "Microsoft.ClassicStorage or Microsoft.Storage). :type id: str :param identity: A managed", "be up to 2048 characters long. :type description: str :ivar", "response). :type next_link: str \"\"\" _attribute_map = { 'value': {'key':", "also follows the OData error response format.). :param error: The", "'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key':", "\"\"\"Metadata pertaining to creation and last modification of the resource.", "connection between service consumer and provider. :param status: Indicates whether", "in ISO8601 duration format (i.e. \"PT30S\" equals 30 seconds) and", "'height', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, 'mode': {'key':", "'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status',", "the live pipeline operation. :vartype name: str :ivar status: The", "\"\"\"A metric dimension. Variables are only populated by the server,", "self).__init__(**kwargs) self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model): \"\"\"An operation. All required", "account. :param value: Array of private endpoint connections. :type value:", "str \"\"\" _attribute_map = { 'retention_period': {'key': 'retentionPeriod', 'type': 'str'},", "str class VideoEntity(ProxyResource): \"\"\"Represents a video resource within Azure Video", "HTTP connections, and the RTP packages are interleaved in the", "None self.error = None class LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline represents a", "'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider", "): super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type',", ":vartype required_members: list[str] :param required_zone_names: The private link resource Private", "'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type':", "'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'},", "None) class NodeBase(msrest.serialization.Model): \"\"\"Base class for nodes. You probably want", "at the time. :type is_in_use: bool \"\"\" _validation = {", "of the Video Analyzer video resource to be used as", "self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state =", "sending a request. All required parameters must be populated in", "kwargs['n'] self.e = kwargs['e'] class SourceNodeBase(NodeBase): \"\"\"Base class for topology", "'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs", "True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'},", "Token must contains all claims and respective values for it", "identity used by the Video Analyzer resource. Variables are only", "represents a unique instance of a live topology, used for", "include: \"Succeeded\", \"Creating\", \"Deleting\", \"Failed\". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState", "resource. This property is only allowed for topologies where \"kind\"", "role: str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to be", "name_available: Indicates if the resource name is available. :type name_available:", "request. :ivar name: The diagnostic log category name. :vartype name:", "about the error, in case the pipeline job fails. :vartype", "n: Required. RSA public key modulus. :type n: str :param", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoArchival, self).__init__(**kwargs)", "\"\"\"Common fields that are returned in the response for all", "archives the content can be reused across many different cameras,", "{ 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True},", "for a particular outcome. The topology should be defined according", "the operation. :type origin: str :param properties: Operation properties format.", "self.state = None self.parameters = kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): \"\"\"A", "exceeds this capacity, then the service will disconnect temporarily from", "} _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties':", "the Internet prior to the token expiration date. :vartype expiration_date:", "properties: ~video_analyzer.models.Properties :param is_data_action: Whether the operation applies to data-plane.", "For example, video recording may be gated on events or", "kwargs['name'] self.value = kwargs['value'] class TrackedResource(Resource): \"\"\"The resource model definition", "__init__( self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small', None)", "be parameterized. This allows individual pipelines refer to different values,", "list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption", "at least one of the given values. :type issuers: list[str]", "this node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder", ":ivar display_description: The metric display description. :vartype display_description: str :ivar", "'supported_time_grain_types': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name',", "'x': {'key': 'x', 'type': 'str'}, 'y': {'key': 'y', 'type': 'str'},", "= { 'tags': {'key': 'tags', 'type': '{str}'}, 'identity': {'key': 'identity',", "of the encoded video. If omitted, the encoder uses the", "of the user assigned managed identity used by the Video", "'type': 'str'}, 'type_properties_type': {'key': 'properties.type', 'type': 'str'}, 'flags': {'key': 'properties.flags',", "the given values. :type audiences: list[str] :param claims: List of", "value: str \"\"\" _validation = { 'name': {'required': True}, 'value':", "and authorized to the cloud account. The provisioning token itself", "): super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs['value'] class", "self).__init__(**kwargs) self.status = kwargs.get('status', None) self.description = kwargs.get('description', None) self.actions_required", "kwargs['name'] class Endpoint(msrest.serialization.Model): \"\"\"The endpoint details. All required parameters must", "access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state:", "# type: str self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor", "Expected value of the claim to be present on the", "{'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, }", "pipeline. Variables are only populated by the server, and will", ":vartype current_key_identifier: str \"\"\" _validation = { 'key_identifier': {'required': True},", "collection of LivePipeline items. :param value: A collection of LivePipeline", "pipeline does not specify a value. :type default: str \"\"\"", "error: ~video_analyzer.models.ErrorDetail \"\"\" _attribute_map = { 'error': {'key': 'error', 'type':", "'type': 'bool'}, } def __init__( self, **kwargs ): super(VideoFlags, self).__init__(**kwargs)", "= None self.error = None class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job represents", "of an specific pipeline topology parameter. See pipeline topology parameters", "actions and its dynamic properties based on the current video", "'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type':", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'}", "'str'}, 'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'}, } def __init__( self,", "\"Invalid\", \"AlreadyExists\". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'role': {'key': 'properties.role', 'type': 'str'},", "and respective values for it to be valid. :type claims:", "None self.token = None class VideoContentUrls(msrest.serialization.Model): \"\"\"Set of URLs to", "True}, 'status': {'readonly': True}, } _attribute_map = { 'id': {'key':", "will not be archived or recorded. This is used, for", "True}, 'tier': {'readonly': True}, } _attribute_map = { 'name': {'key':", "{'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type':", "resource. :param created_by: The identity that created the resource. :type", "message. :vartype message: str :ivar target: The error target. :vartype", ":param value: A collection of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity]", "to be present on the token. :type value: str \"\"\"", "def __init__( self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList'", "kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): \"\"\"Details about the error for a", "} def __init__( self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags =", "video_name: str :param video_creation_properties: Optional video properties to be used", "{'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__( self, **kwargs ):", "can connect to over TLS transport (data is encrypted in", "the parameter value of an specific pipeline topology parameter. See", "1280x720. All required parameters must be populated in order to", "class KeyVaultProperties(msrest.serialization.Model): \"\"\"The details for accessing the encryption keys in", "Operation start time. :type start_time: str :param end_time: Operation end", "authorized to the cloud account. The provisioning token itself is", "self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model):", "'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},", "values include: \"Enabled\", \"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess \"\"\"", "'str'}, } def __init__( self, **kwargs ): super(IotHub, self).__init__(**kwargs) self.id", "{'key': 'actionType', 'type': 'str'}, } def __init__( self, **kwargs ):", "= kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): \"\"\"A custom preset for encoding", "'type': 'str'}, } def __init__( self, **kwargs ): super(NodeInput, self).__init__(**kwargs)", "'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } _subtype_map = {", "of the given values. :type audiences: list[str] :param claims: List", "'str'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint", "video is currently being referenced be an active pipeline. The", "id present on the JWT token header. :type kid: str", "__init__( self, **kwargs ): super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' #", "**kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link =", "parameters without a default value must be defined. Topology parameters", "be populated in order to send to Azure. :param can_stream:", "changes on the service provider require any updates on the", "'password': {'key': 'password', 'type': 'str'}, } def __init__( self, **kwargs", "the granularity of days, up to a maximum of 10", "edge_module_id: Internal ID generated for the instance of the Video", "The error target. :vartype target: str :ivar details: The error", "camera or generic RTSP server to be ingested into a", "ServiceSpecification(msrest.serialization.Model): \"\"\"The service metric specifications. Variables are only populated by", "value of an specific pipeline topology parameter. See pipeline topology", "integration: Public network access for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl", "id: Operation resource ID. :type id: str :param start_time: Operation", "{'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs)", "'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},", "'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},", ":type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options to use when", "'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},", "video resource. If archiving is enabled, this results in a", "encrypt the account. The key may either be versioned (for", "to access the storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status:", "type is 'archive' and video archiving is enabled. :type archive_base_url:", "collection of Operation items. :param value: A collection of Operation", "when sending a request. :ivar name: The name of the", "display: The operation display name. :type display: ~video_analyzer.models.OperationDisplay :param origin:", "'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key':", "'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'n': {'key': 'n', 'type':", "'info': {'key': 'info', 'type': 'object'}, } def __init__( self, **kwargs", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} }", "'type_properties_type': {'readonly': True}, 'flags': {'readonly': True}, 'content_urls': {'readonly': True}, }", "via a video resource. If archiving is enabled, this results", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key':", "'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type':", "specified in ISO8601 duration format (i.e. \"P1D\" equals 1 day)", "for encoding of the input content. For example, it can", "= kwargs['certificates'] class PipelineJob(ProxyResource): \"\"\"Pipeline job represents a unique instance", "} def __init__( self, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags =", "endpoints and credentials. Overall a topology is composed of the", "**kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs.get('identity',", ":param created_at: The timestamp of resource creation (UTC). :type created_at:", "token signing key. Token signature must match exactly one key.", ":param processors: List of the topology processor nodes. Processor nodes", "instructions on how the input video should be processed. You", "{'key': 'inputs', 'type': '[NodeInput]'}, 'preset': {'key': 'preset', 'type': 'EncoderPresetBase'}, }", "issuers: list[str] :param audiences: List of expected token audiences. Token", "as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] \"\"\" _validation", "on the token. :type value: str \"\"\" _validation = {", "Possible values include: \"Invalid\", \"AlreadyExists\". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason", "'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},", "be lost if the code is regenerated. # -------------------------------------------------------------------------- from", "be ignored when sending a request. :ivar id: The ARM", "which audio should be encoded (2-channel stereo audio at a", "transport (no encryption in transit). All required parameters must be", "'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def", "\"\"\"Optional flags used to change how video is published. These", "The display name for the dimension. :vartype display_name: str :ivar", "~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':", "for tokens generated with RSA algorithm. All required parameters must", "can lead to errors when uploading content to the archive.", "'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs)", "str \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineJob]'},", "None) self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class", "'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } def __init__( self, **kwargs", "self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model): \"\"\"Base class for endpoints. You", "be downloaded as MP4 files. Variables are only populated by", "from 500 to 3000 Kbps in increments of 100 Kbps.", "video should be processed. You probably want to use the", "UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details of the user assigned managed identity used", "This value can be updated at any time and the", "log emitted by service. Variables are only populated by the", "): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.description = kwargs.get('description',", "on the consumer. :type actions_required: str \"\"\" _attribute_map = {", "expiration: The date-time by when this pipeline job will be", ":vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource of private end", "{ 'type': {'required': True}, 'name': {'required': True}, 'endpoint': {'required': True},", "{'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'x': {'required':", "super(Operation, self).__init__(**kwargs) self.name = kwargs['name'] self.display = kwargs.get('display', None) self.origin", "sink publishes content via the video resource. This property is", "): super(EncoderPresetBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class", "**kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link =", "Value must be specified in ISO8601 duration format (i.e. \"PT30S\"", ":param sinks: List of the topology sink nodes. Sink nodes", ":ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg = kwargs['alg']", "'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key':", "str \"\"\" _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'},", "self.description = kwargs.get('description', None) self.type_properties_type = None self.flags = None", "Bitrate, in kilobits per second or Kbps, at which audio", "'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState',", "when sending a request. :ivar name: The metric name. :vartype", ":param identity: A managed identity that Video Analyzer will use", "class for credential objects. You probably want to use the", "**kwargs ): super(IotHub, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs['identity']", "or not public network access is allowed for resources under", "_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key':", "within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\" _validation = {", "presented as part of the credentials. It is recommended that", "self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state", "{'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ):", "self.type = None # type: Optional[str] self.name = kwargs['name'] class", "'display_name': {'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},", "token to download the video MP4 file. The resulting MP4", "Manager proxy resource. It will not have tags and a", "super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None)", "name validation to be skipped. Default is 'false'. :type ignore_hostname:", "= kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): \"\"\"Operation details. :param provider: The", "Key Vault mapping. :vartype status: str \"\"\" _validation = {", "Manager resources. Variables are only populated by the server, and", "class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection of PipelineTopology items. :param value: A", "to Azure. :param can_stream: Required. Value indicating whether or not", "{'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, }", "None self.details = None self.additional_info = None class ErrorResponse(msrest.serialization.Model): \"\"\"Common", "they are not defined in the pipelines. All required parameters", "= kwargs['name'] self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time', None)", "__init__( self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "= None self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model): \"\"\"Video archival properties.", "= None self.display_description = None self.unit = None self.aggregation_type =", "the pipeline job. Variables are only populated by the server,", "encoder sets it automatically to try and match the quality", "'tags', 'type': '{str}'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key':", "is encrypted in transit). All required parameters must be populated", "\"\"\" _validation = { 'user_assigned_identity': {'required': True}, } _attribute_map =", "UnsecuredEndpoint. All required parameters must be populated in order to", ":type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control", "'type': 'str'}, } def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs)", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData \"\"\" _validation = {", "live pipeline will process content according to the pipeline topology", ":param tags: A set of tags. Resource tags. :type tags:", "of the built-in encoding preset. Possible values include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\",", "must be referenced throughout the topology and can optionally have", "name. :type required_zone_names: list[str] \"\"\" _validation = { 'id': {'readonly':", "capture and publish content. Note: if downstream of RTSP source,", "credentials. All required parameters must be populated in order to", "be populated in order to send to Azure. :param id:", "self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message", "class directly. Known sub-classes are: VideoEncoderH264. All required parameters must", "to Azure. :param key_identifier: Required. The URL of the Key", "'endpoint_url': {'key': 'endpointUrl', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'},", "IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'}", "is recommended that the expected use of the topology to", "'str'}, } def __init__( self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access", "{'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account':", "tags: dict[str, str] :param identity: The identities associated to the", "The provisioning state of the private endpoint connection resource. Possible", "type: str self.name = kwargs['name'] class Endpoint(msrest.serialization.Model): \"\"\"The endpoint details.", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self,", "(null), all video content is retained indefinitely. This property is", "kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): \"\"\"Video flags contain information about the", "{ 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type':", "status: The current status of the Key Vault mapping. :vartype", "parameter declarations. Parameters declared here can be referenced throughout the", "kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of private endpoint connection operation.", "the OData error response format.). :param error: The error object.", "kind: Topology kind. Possible values include: \"Live\", \"Batch\". :type kind:", "self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection of EdgeModuleEntity items.", "\"\"\"Encoder processor allows for encoding of the input content. For", "__init__( self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class", "= kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption',", "download URL. This URL can be used in conjunction with", "RsaTokenKey. All required parameters must be populated in order to", "class NodeBase(msrest.serialization.Model): \"\"\"Base class for nodes. You probably want to", "content will not be archived or recorded. This is used,", "\"\"\" _validation = { 'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly': True},", "the recipe or instructions on how audio should be processed.", "They are available when the video type is 'archive' and", "'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},", "'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},", "P30D (30 days), content older than 30 days will be", "{ 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, } def __init__( self,", "'certificates', 'type': '[str]'}, } def __init__( self, **kwargs ): super(PemCertificateList,", "str self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder', None) class", "'str'}, 'n': {'key': 'n', 'type': 'str'}, 'e': {'key': 'e', 'type':", "True}, 'e': {'required': True}, } _attribute_map = { 'type': {'key':", "send to Azure. :param id: Required. The ID of the", "from a Video Analyzer video resource to be ingested into", "None self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions =", "content URL as the value for the \"token\" query string", "{ 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__( self, **kwargs ):", "set to 'false'. :type disable_archive: str :param disable_rtsp_publishing: When set", "data sources nodes such as an RTSP source which allows", "resource identifier to use when accessing a resource. :type user_assigned_identity:", "created_by: str :param created_by_type: The type of identity that created", "alongside the RTSP messages. Possible values include: \"Http\", \"Tcp\". :type", "{'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, }", "enable_regional_mdm_account: bool :ivar source_mdm_account: The source MDM account. :vartype source_mdm_account:", "in the pipeline which output is used as input of", ":type audiences: list[str] :param claims: List of additional token claims", "'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions',", "ranges as a string. You probably want to use the", "= kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): \"\"\"Details about the error for", "Possible values include: \"Pending\", \"Approved\", \"Rejected\". :type status: str or", "None) self.is_data_action = kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type', None) class", "the module is able to periodically connect to the cloud.", "Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation = { 'type':", "The current key used to encrypt Video Analyzer account, including", "the video sink publishes content via the video resource. This", "and not this class directly. Known sub-classes are: EccTokenKey, RsaTokenKey.", "managed identity used by the Video Analyzer resource. Variables are", "encoding of the input content. For example, it can used", "encryption in transit). All required parameters must be populated in", "Possible values include: \"Failed\", \"InProgress\", \"Succeeded\". :vartype provisioning_state: str or", "resources. Variables are only populated by the server, and will", "be ingested into a pipeline. All required parameters must be", "the content can be reused across many different cameras, as", "in the project root for license information. # Code generated", "to send to Azure. :param id: Required. The ID of", "self, **kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name = None self.display_name =", "None) self.next_link = kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology describes", "agree on a set of authentication keys which will be", "super(PrivateEndpoint, self).__init__(**kwargs) self.id = None class PrivateEndpointConnection(Resource): \"\"\"The Private Endpoint", "self.code = None self.message = None self.target = None self.details", "# type: str self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase): \"\"\"Video sink", "from 4K to 1280x720. All required parameters must be populated", ":param password: Required. Password to be presented as part of", "{ 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type':", "this live pipeline will process content according to the pipeline", "'preset': {'key': 'preset', 'type': 'EncoderPresetBase'}, } def __init__( self, **kwargs", "} def __init__( self, **kwargs ): super(RtspSource, self).__init__(**kwargs) self.type =", "available when the video type is 'archive' and video archiving", "designates that Azure Video Analyzer's list of trusted authorities should", "when uploading content to the archive. Default value is 30", "= '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class RtspSource(SourceNodeBase): \"\"\"RTSP source allows", "'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly': True}, } _attribute_map = {", "directly. Known sub-classes are: JwtAuthentication. All required parameters must be", "\"\"\" _attribute_map = { 'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion':", "self.name = None self.status = None self.error = None class", "list[str] :ivar dimensions: The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar", "referenced be an active pipeline. The fact that is being", "kwargs.get('storage_accounts', None) self.endpoints = None self.encryption = kwargs.get('encryption', None) self.iot_hubs", "or generic RTSP server to be ingested into a pipeline.", "set of tags. Resource tags. :type tags: dict[str, str] :param", "'VideoEncoderH264'} } def __init__( self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type", "~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation = { 'type': {'required': True}, } _attribute_map", "{ 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__( self,", "the required information for Video Analyzer to connect to RTSP", "A collection of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link:", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to", "{'key': 'systemData', 'type': 'SystemData'}, 'role': {'key': 'properties.role', 'type': 'str'}, 'authentication':", "\"\"\"Video archival properties. :param retention_period: Video retention period indicates the", "{'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku':", "omitted, the encoder uses the resolution of the input video.", "Video Analyzer. Videos can be ingested from RTSP cameras through", "Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be", "~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change how the video sink", "'type': 'object'}, } def __init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs)", "self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type:", "real-time content processing. When activated, this live pipeline will process", "str :param mode: Describes the video scaling mode to be", "kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None)", ":type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate.", "{'required': True}, 'alg': {'required': True}, 'x': {'required': True}, 'y': {'required':", "): super(Operation, self).__init__(**kwargs) self.name = kwargs['name'] self.display = kwargs.get('display', None)", "= kwargs['password'] class VideoAnalyzer(TrackedResource): \"\"\"The Video Analyzer account. Variables are", "A collection of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link:", "'@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'}", "Required. Name of the built-in encoding preset. Possible values include:", "by the RTSP and RTP exchange: TCP or HTTP. When", "True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'},", "self.type = kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status", "(data is encrypted in transit). All required parameters must be", "'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'},", "self.x = kwargs['x'] self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource): \"\"\"The representation", "between service consumer and provider. :param status: Indicates whether the", "None) self.ignore_signature = kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): \"\"\"Properties for expected", "initialized and authorized to the cloud account. The provisioning token", ":type type: str :param ranges: Required. The sequence of datetime", "str] :param identity: The identities associated to the Video Analyzer", "\"\"\"A collection of Operation items. :param value: A collection of", "VideoArchival(msrest.serialization.Model): \"\"\"Video archival properties. :param retention_period: Video retention period indicates", ":type id: str :param identity: Required. The IoT Hub identity.", "the new desired retention period will be effective within 24", "self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs = kwargs['inputs'] class", "__init__( self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' #", "): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges", "reason: The reason why the given name is not available.", "video state. All required parameters must be populated in order", "resource. :type last_modified_by: str :param last_modified_by_type: The type of identity", "connection. By default, strict validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions", "for the initial handshake between IoT edge module and the", "private link resource group id. :vartype group_id: str :ivar required_members:", "The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime :param", "information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology kind. Possible", "= None self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection of", "'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},", ":param name: Required. Name of the parameter. :type name: str", "large: High resolution preview image URL. :type large: str \"\"\"", "{'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, **kwargs ):", "resource. If archiving is enabled, this results in a video", "500 to 3000 Kbps in increments of 100 Kbps. If", "name of the resource. :vartype name: str :ivar type: The", "True}, } _attribute_map = { 'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},", "\"AlreadyExists\". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason", "self, **kwargs ): super(Sku, self).__init__(**kwargs) self.name = kwargs['name'] self.tier =", "Possible values include: \"Bytes\", \"Count\", \"Milliseconds\". :vartype unit: str or", "super(TokenKey, self).__init__(**kwargs) self.type = None # type: Optional[str] self.kid =", "\"\"\"The representation of an edge module. Variables are only populated", "**kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku = kwargs['sku']", "str \"\"\" _validation = { 'type': {'required': True}, 'ranges': {'required':", "defined in the pipelines. All required parameters must be populated", "error, in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError", "True}, 'has_data': {'required': True}, 'is_in_use': {'required': True}, } _attribute_map =", "kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None self.parameters", "video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\" _validation = { 'type': {'required': True}, }", "Code generated by Microsoft (R) AutoRest Code Generator. # Changes", "group_id: str :ivar required_members: The private link resource required member", "resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties.", "last modification (UTC). :type last_modified_at: ~datetime.datetime \"\"\" _attribute_map = {", "The type of the endpoint. Possible values include: \"ClientApi\". :type", "populated in order to send to Azure. :ivar id: Fully", ":param operation: The operation type. :type operation: str :param description:", "be encoded. If omitted, encoder sets it automatically to try", "RTSP playback URL will not be published, disabling low latency", "private_link_service_connection_state: A collection of information about the state of the", "= '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers = kwargs.get('issuers', None) self.audiences", "name of the upstream node in the pipeline which output", "composed of the following: * Parameters: list of user defined", "topologies where \"kind\" is set to \"live\". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions", "~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter values", "your account. :type bitrate_kbps: int :ivar state: Current state of", "is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase \"\"\" _validation =", "'edge_module_id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "to \"live\". :type retention_period: str \"\"\" _attribute_map = { 'title':", "status: str :ivar error: The error details for the live", "operation is performed. :type resource: str :param operation: The operation", "'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = {", ":param actions_required: A message indicating if changes on the service", "storage transactions while increasing the archive playback latency. Value must", ":type display: ~video_analyzer.models.OperationDisplay :param origin: Origin of the operation. :type", "of the credentials. :type username: str :param password: Required. Password", "str \"\"\" _attribute_map = { 'segment_length': {'key': 'segmentLength', 'type': 'str'},", "Elliptical curve algorithm to be used: ES256, ES384 or ES512.", "existing video resource used to capture and publish content. Note:", ":vartype expiration_date: ~datetime.datetime :ivar token: The content token value to", "kwargs['name'] self.display = kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.properties", "firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted certificate", "'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties',", "'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'}, } def __init__( self, **kwargs", "None self.error = None class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job represents a", "created_by: The identity that created the resource. :type created_by: str", ":ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account.", "status of an operation on the live pipeline. Variables are", "Required. The geo-location where the resource lives. :type location: str", "be updated at any time and the new desired retention", "'bitrateKbps', 'type': 'str'}, } def __init__( self, **kwargs ): super(AudioEncoderAac,", "\"\"\"Base type for all encoder presets, which define the recipe", "present on the token. :type name: str :param value: Required.", "super(TunnelBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase):", "**kwargs ): super(NodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "be used when they are not defined in the pipelines.", "node in the pipeline which output is used as input", "{'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account':", "It will retry to re-establish connection (with exponential backoff), checking", "a batch topology, used for offline processing of selected portions", "pipeline data to be analyzed, processed or transformed. :type processors:", "self.media_info = kwargs.get('media_info', None) self.archival = kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model):", "'claims', 'type': '[TokenClaim]'}, 'keys': {'key': 'keys', 'type': '[TokenKey]'}, } def", "for all encoder presets, which define the recipe or instructions", "as the module is able to periodically connect to the", "kwargs['alg'] self.x = kwargs['x'] self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource): \"\"\"The", ":param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param", "str self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource): \"\"\"Pipeline job represents a", "allow for a topology to be parameterized. This allows individual", "**kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id = None class PrivateEndpointConnection(Resource): \"\"\"The", "{'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale':", "\"Canceled\", \"Completed\", \"Failed\". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration:", "{'required': True}, 'username': {'required': True}, 'password': {'required': True}, } _attribute_map", "to 5 minutes, in 30 seconds increments. :type segment_length: str", "Analyzer can connect to the endpoint URL. This is an", "account mapping. :vartype status: str \"\"\" _validation = { 'id':", "= kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): \"\"\"The details for accessing the", "The name of the resource. :vartype name: str :ivar type:", "self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs.get('identity', None) self.status =", "topology sink nodes. You probably want to use the sub-classes", "for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\" _validation = {", "topology, used for real-time ingestion, archiving and publishing of content", "up recorded media within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\"", "{'required': True}, 'kid': {'required': True}, } _attribute_map = { 'type':", "pipeline (read-only). Possible values include: \"Processing\", \"Canceled\", \"Completed\", \"Failed\". :vartype", "'#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, **kwargs ): super(EndpointBase, self).__init__(**kwargs)", "Describes the resolution of the encoded video. If omitted, the", "without a version (for example https://vault/keys/mykey). :type key_identifier: str :ivar", "'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key':", "type: str class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details of the user assigned", "True}, 'display_name': {'readonly': True}, 'display_description': {'readonly': True}, 'unit': {'readonly': True},", "= kwargs['inputs'] class Sku(msrest.serialization.Model): \"\"\"The SKU details. Variables are only", "): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.identity = kwargs.get('identity',", "class SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining to creation and last modification of", "modified the resource. :type last_modified_by: str :param last_modified_by_type: The type", "**kwargs ): super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str", "'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key':", "The User Assigned Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\"", "True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map =", "to 24 hours or less. Currently, there can be only", ":param value: A collection of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology]", "**kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A", "consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning", "end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of", "datetime ranges as a string. The datetime values should follow", "{'key': '@type', 'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder':", "the owner of the service. Possible values include: \"Pending\", \"Approved\",", "): super(TokenKey, self).__init__(**kwargs) self.type = None # type: Optional[str] self.kid", "Private link DNS zone name. :type required_zone_names: list[str] \"\"\" _validation", "The current status of the Key Vault mapping. :vartype status:", "'VideoSource'} } def __init__( self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = kwargs.get('transport',", "video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information about", "and can vary between 1 day to 10 years, in", "'GroupLevelAccessControl'}, } def __init__( self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration", "'toBeExportedForShoebox', 'type': 'bool'}, } def __init__( self, **kwargs ): super(MetricDimension,", "day increments. When absent (null), all video content is retained", "to an existing pipeline topology. When activated, this pipeline job", "this value is parameterized as a secret string in order", "None self.source_mdm_namespace = None self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network", "to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts:", "can be up to 256 characters long. :type title: str", "the resource for which availability needs to be checked. :type", ":vartype display_name: str :ivar blob_duration: The time range for requests", "Video Analyzer account. Possible values include: \"Enabled\", \"Disabled\". :type public_network_access:", ":param alg: Required. RSA algorithm to be used: RS256, RS384", "latencies which are approximately double of the chosen video segment", "type is 'archive' and a live, low-latency feed is available", "'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'},", "__init__( self, **kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None)", "~video_analyzer.models.SystemData :param kind: Required. Topology kind. Possible values include: \"Live\",", "DNS zone name. :type required_zone_names: list[str] \"\"\" _validation = {", "True}, 'system_data': {'readonly': True}, 'group_id': {'readonly': True}, 'required_members': {'readonly': True},", "an edge module. Variables are only populated by the server,", "_attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, } def", "job will be automatically deleted from your account. :vartype expiration:", ":type mode: str or ~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map = { 'height':", "processor allows for encoding of the input content. For example,", "details for accessing the encryption keys in Key Vault. Variables", "token. :type name: str :param value: Required. Expected value of", "of nodes which perform data analysis or transformations. * Sinks:", "None self.principal_id = None class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and password credentials.", "to be kept in storage. It must be provided in", "on the JWT token header. :type kid: str :param alg:", "DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording can be", "Possible values include: \"Standard\". :vartype tier: str or ~video_analyzer.models.SkuTier \"\"\"", "when accessing a resource. :type user_assigned_identity: str \"\"\" _validation =", "{ 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__( self,", "existing captured video through a pipeline job. Videos ingested through", "True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly': True},", "Key Vault key used to encrypt the account. The key", "of the connection between service consumer and provider. :type private_link_service_connection_state:", "token: The content token value to be added to the", "\"\"\"Pipeline job represents a unique instance of a batch topology,", "items. :param value: A collection of LivePipeline items. :type value:", "module. Variables are only populated by the server, and will", "public network access is allowed for resources under the Video", "the tunnel through which Video Analyzer can connect to the", "Manager tracked top level resource which has 'tags' and a", "preset for encoding video with the H.264 (AVC) codec. All", "None self.media_info = kwargs.get('media_info', None) self.archival = kwargs.get('archival', None) class", "'has_data': {'required': True}, 'is_in_use': {'required': True}, } _attribute_map = {", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an", "and the sum of the ranges should add up to", "The identity that created the resource. :type created_by: str :param", "kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): \"\"\"Video source allows for content from", "'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__( self, **kwargs ): super(TunnelBase,", "\"\"\"Username and password credentials. All required parameters must be populated", "\"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly': True},", ":type can_stream: bool :param has_data: Required. Value indicating whether or", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self,", "List of the topology processor nodes. Processor nodes enable pipeline", "trusted_certificates: List of trusted certificate authorities when authenticating a TLS", "video height. :type height: str :param width: The desired output", "'client_id': {'readonly': True}, 'principal_id': {'readonly': True}, } _attribute_map = {", "super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class", "= { 'name': {'required': True}, 'value': {'required': True}, } _attribute_map", "gated on events or camera may not be accessible at", "is now below the reserved capacity. Doing so will ensure", "input video. :type frame_rate: str :param scale: Describes the resolution", "order to send to Azure. :param id: Required. The IoT", "indicating whether or not the video is currently being referenced", "change the resolution from 4K to 1280x720. All required parameters", "None) self.next_link = kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): \"\"\"Video flags contain", "module twin properties. :vartype token: str \"\"\" _validation = {", "created the resource. Possible values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\".", "'#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class VideoEntity(ProxyResource): \"\"\"Represents a video resource", "in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token:", "'iot_hub_name': {'required': True}, 'device_id': {'required': True}, } _attribute_map = {", "the token. :type value: str \"\"\" _validation = { 'name':", "The desired output video height. :type height: str :param width:", "None) self.created_by_type = kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at', None) self.last_modified_by", "'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(StorageAccount,", "recipe or instructions on how the input content should be", "processing of selected portions of archived content. Variables are only", "class AudioEncoderAac(AudioEncoderBase): \"\"\"A custom preset for encoding audio with the", "None) class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the parameter value of an specific", "ignore_hostname: When set to 'true' causes the certificate subject name", "increasing the archive playback latency. Value must be specified in", "Azure. :param id: Required. The ID of the storage account", "describes the processing steps to be applied when processing content", "access tokens. Having multiple keys allow for seamless key rotation", "with the video content authorization token to download the video", "to send to Azure. :param node_name: Required. The name of", "topology allows for video and audio to be captured, optionally", "endpoint details. All required parameters must be populated in order", "aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions.", "download the most recent still image from the video archive", "new video resource needs to be created on the service.", "'SystemData'}, 'role': {'key': 'properties.role', 'type': 'str'}, 'authentication': {'key': 'properties.authentication', 'type':", "metric lock aggregation type. Possible values include: \"Average\", \"Count\", \"Total\".", "self.type = None # type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check", "description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved", "or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation = { 'id':", "is set to \"live\". :type retention_period: str \"\"\" _attribute_map =", "{ 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self, **kwargs ):", "'object'}, } def __init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type", "value to be returned as part of the resource on", "available when the video type is 'archive' and preview images", "over clear transport (no encryption in transit). All required parameters", "content type. Different content types are suitable for different applications", "values include: \"Succeeded\", \"Creating\", \"Deleting\", \"Failed\". :vartype provisioning_state: str or", "return error details for failed operations. (This also follows the", ":type default: str \"\"\" _validation = { 'name': {'required': True},", "self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset = kwargs['preset'] class", "in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param", "= { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, **kwargs", "or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation = { 'type': {'required': True}, }", "captured video through a pipeline job. Videos ingested through live", "'certificates': {'key': 'certificates', 'type': '[str]'}, } def __init__( self, **kwargs", "self.small = kwargs.get('small', None) self.medium = kwargs.get('medium', None) self.large =", "self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model):", "class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint describes an endpoint that the pipeline", "'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},", "error code. :type code: str :param message: The error message.", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, }", "be streamed. Only \"archive\" type videos can be streamed. :type", "individual cameras' RTSP endpoints and credentials. Overall a topology is", "'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key':", "mapping. :vartype status: str \"\"\" _validation = { 'id': {'required':", "data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\"", "super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class RtspSource(SourceNodeBase):", "self.value = kwargs['value'] class TrackedResource(Resource): \"\"\"The resource model definition for", "self).__init__(**kwargs) self.small = kwargs.get('small', None) self.medium = kwargs.get('medium', None) self.large", "{'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type':", "bitrate of the input audio is used. :type bitrate_kbps: str", "types.Constant filled by server. :type type: str :param name: Required.", "and a location. Variables are only populated by the server,", "List of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\" _validation =", "\"\"\"Single topology parameter declaration. Declared parameters can and must be", "used as the source. :type video_name: str :param time_sequences: Required.", "__init__( self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' #", "video archiving is enabled. :type archive_base_url: str :param rtsp_tunnel_url: Video", "} def __init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code =", "None) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints", "# type: str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class", "= kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection of LivePipeline items.", "dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional MDM account is", "str :param password: Required. Password to be presented as part", ":type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of a new", "'str'}, } def __init__( self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type", "'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'},", "camera. It will retry to re-establish connection (with exponential backoff),", "'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},", "encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource.", "str :param identity: A managed identity that Video Analyzer will", "= { 'value': {'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink',", "_attribute_map = { 'id': {'key': 'id', 'type': 'str'}, } def", "identity type. :type type: str :param user_assigned_identities: The User Assigned", "None self.name = None self.type = None self.system_data = None", ":ivar display_name: The display name for the dimension. :vartype display_name:", "public key modulus. :type n: str :param e: Required. RSA", "= kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): \"\"\"The", "type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags: Video flags contain information", "of the IoT Hub. :type iot_hub_name: str :param device_id: Required.", "None) self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): \"\"\"A collection of", "self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use = kwargs['is_in_use']", "224, and 256. If omitted, the bitrate of the input", "video already exists. :param title: Optional title provided by the", "description: Optional video description provided by the user. Value can", "referenced topology. Topology parameters without a default value must be", "must be initialized and connected to the Internet prior to", "'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},", "Azure. :ivar id: Fully qualified resource ID for the resource.", "self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options',", "order to send to Azure. :param type: Required. The type", "class Resource(msrest.serialization.Model): \"\"\"Common fields that are returned in the response", "'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type':", "absolute datetime ranges as a string. The datetime values should", "(JWT). All required parameters must be populated in order to", "certificates: Required. PEM formatted public certificates. One certificate per entry.", "flags contain information about the available video actions and its", "'type': '[Operation]'}, } def __init__( self, **kwargs ): super(OperationCollection, self).__init__(**kwargs)", "PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection of PipelineTopology items. :param value: A collection", ":type type: str :param certificates: Required. PEM formatted public certificates.", "name: str :param id: Operation resource ID. :type id: str", "None) class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update operation for a Video Analyzer", "can be created by exporting sequences from existing captured video", "{'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, } def __init__( self, **kwargs ):", "derived types.Constant filled by server. :type type: str \"\"\" _validation", "kwargs['expiration_date'] class LivePipeline(ProxyResource): \"\"\"Live pipeline represents a unique instance of", "\"\"\"RTSP source allows for media from an RTSP camera or", "'value': {'key': 'value', 'type': '[Operation]'}, } def __init__( self, **kwargs", "{'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList':", "scenario to be achieved and can be reused across many", "access level granted by this policy. Possible values include: \"Reader\".", "super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class EncoderPresetBase(msrest.serialization.Model):", "'false'. :type ignore_hostname: str :param ignore_signature: When set to 'true'", "'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key':", "Azure Video Analyzer. Videos can be ingested from RTSP cameras", "network_access_control: Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl", "list[~video_analyzer.models.Operation] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'},", "'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } _subtype_map =", ":type description: str :param parameters: List of the topology parameter", "'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},", "resources. :param value: Array of private link resources. :type value:", "claims: list[~video_analyzer.models.TokenClaim] :param keys: List of keys which can be", ":ivar state: Current state of the pipeline (read-only). Possible values", "mapping. :vartype status: str \"\"\" _validation = { 'type': {'required':", "CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video", "generated with Elliptical Curve algorithm. All required parameters must be", "audio content. :param segment_length: Video segment length indicates the length", "kwargs.get('origin', None) self.properties = kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action', None)", "group_id: The private link resource group id. :vartype group_id: str", "msrest.serialization class Resource(msrest.serialization.Model): \"\"\"Common fields that are returned in the", "through which Video Analyzer can connect to the endpoint URL.", "description: Optional description provided by the user. Value can be", "= kwargs['identity'] self.status = None class JwtAuthentication(AuthenticationBase): \"\"\"Properties for access", "= None class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type for all audio encoder", "frame_rate: The frame rate (in frames per second) of the", "super(VideoEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps =", "can have optional default values and can later be defined", "= None self.media_info = kwargs.get('media_info', None) self.archival = kwargs.get('archival', None)", "= kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection of AccessPolicyEntity items.", "'type': {'key': '@type', 'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},", "which output is used as input of the current node.", "} def __init__( self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration =", "'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)", "The error detail. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = {", "not be archived or recorded. This is used, for example,", "target: The error target. :vartype target: str :ivar details: The", "operation for a Video Analyzer account. Variables are only populated", "account is (optionally) encrypted. Variables are only populated by the", "'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key':", "of the input audio is used. :type bitrate_kbps: str \"\"\"", ":type id: str :param start_time: Operation start time. :type start_time:", "self.type = kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity',", "indefinitely. This property is only allowed for topologies where \"kind\"", "the connection has been Approved/Rejected/Removed by the owner of the", "Possible values include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type name: str", "key used to encrypt the account. The key may either", "self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of video analyzer", "to be applied. Default mode is 'Pad'. If the mode", "PemCertificateList. All required parameters must be populated in order to", "{'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ):", "RTSP camera exceeds this capacity, then the service will disconnect", "queues, and blobs. The primary storage account must be a", ":vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype", "= { 'value': {'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink',", "{ 'expiration_date': {'readonly': True}, 'token': {'readonly': True}, } _attribute_map =", "must match exactly one key. :type keys: list[~video_analyzer.models.TokenKey] \"\"\" _validation", "allowed for resources under the Video Analyzer account. Possible values", "{'readonly': True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly': True}, } _attribute_map", "TlsEndpoint(EndpointBase): \"\"\"TLS endpoint describes an endpoint that the pipeline can", "{'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers':", "str :ivar system_data: Azure Resource Manager metadata containing createdBy and", "{'readonly': True}, } _attribute_map = { 'log_specifications': {'key': 'logSpecifications', 'type':", "'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'video_name': {'key': 'videoName',", "None self.expiration = None self.error = None self.parameters = kwargs.get('parameters',", "source nodes. You probably want to use the sub-classes and", "resource used to capture and publish content. Note: if downstream", "self.type = None # type: Optional[str] class TlsEndpoint(EndpointBase): \"\"\"TLS endpoint", "the H.264 (AVC) codec. All required parameters must be populated", "str :param start_time: Operation start time. :type start_time: str :param", "__init__( self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type = None #", "for seamless key rotation of the token signing key. Token", "to be provided to the Azure Video Analyzer IoT edge", "is 'false'. :type ignore_hostname: str :param ignore_signature: When set to", "hours. :type retention_period: str \"\"\" _attribute_map = { 'retention_period': {'key':", "'canStream', 'type': 'bool'}, 'has_data': {'key': 'hasData', 'type': 'bool'}, 'is_in_use': {'key':", "kwargs['certificates'] class PipelineJob(ProxyResource): \"\"\"Pipeline job represents a unique instance of", "'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = {", "the video MP4 file. The resulting MP4 file can be", "be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks:", ":param id: Required. The ID of the storage account resource.", "'message': {'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info':", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__(", "details about the associated storage account. Variables are only populated", "= None self.display_name = None self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model):", "str \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineTopology]'},", "Edge module twin properties. :vartype token: str \"\"\" _validation =", "provided in the ISO8601 duration format in the granularity of", "'RsaTokenKey'} } def __init__( self, **kwargs ): super(TokenKey, self).__init__(**kwargs) self.type", "single instance of Azure Video analyzer IoT edge module to", "a particular outcome. The topology should be defined according to", "self.message = None self.target = None self.details = None self.additional_info", "def __init__( self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None", "referenced throughout the topology and can optionally have default values", "of storage transactions while increasing the archive playback latency. Value", "True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly': True},", "be specified. Else if the mode is 'PreserveAspectRatio' then only", "kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): \"\"\"The details for accessing the encryption", "in one response). :type next_link: str \"\"\" _attribute_map = {", "or ~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map = { 'public_network_access': {'key': 'publicNetworkAccess', 'type':", "service consumer and provider. :param status: Indicates whether the connection", "True}, 'username': {'required': True}, 'password': {'required': True}, } _attribute_map =", "content URLs.\". Variables are only populated by the server, and", "should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase \"\"\" _validation = {", "= None class ErrorResponse(msrest.serialization.Model): \"\"\"Common error response for all Azure", "kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a built-in preset for encoding the", "kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by', None)", "\"\"\"Contains information about the video and audio content. :param segment_length:", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'edge_module_id': {'readonly': True}, }", "= None self.content_urls = None self.media_info = kwargs.get('media_info', None) self.archival", "'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key':", "'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self,", "use to access the storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar", "to be presented as part of the credentials. :type username:", "Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar", "None) self.state = None self.expiration = None self.error = None", "video resource needs to be created on the service. :type", "str :param description: An optional description for the pipeline. :type", "self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type:", "'bool'}, } def __init__( self, **kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream", "): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): \"\"\"Required validation", "{'key': 'type', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default':", "= '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource):", "): super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video", "video_name: Required. Name of the Video Analyzer video resource to", "to errors when uploading content to the archive. Default value", "greater than zero, and less than or equal to 300.", "be present on the token. :type name: str :param value:", "super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username =", "'[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity", "'type': 'VideoEncoderBase'}, } def __init__( self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs)", "will retry to re-establish connection (with exponential backoff), checking to", "of the endpoint. Possible values include: \"ClientApi\". :type type: str", "Possible values include: \"RS256\", \"RS384\", \"RS512\". :type alg: str or", "} _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name':", "self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model): \"\"\"A metric emitted by service.", "Azure. :param endpoint_url: The URL of the endpoint. :type endpoint_url:", "True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True},", "Required. Name of the IoT Hub. :type iot_hub_name: str :param", "height. :type height: str :param width: The desired output video", "directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters", "live pipeline operation. :vartype status: str :ivar error: The error", "video is published. These are only allowed for topologies where", ":param code: The error code. :type code: str :param message:", "'device_id': {'key': 'deviceId', 'type': 'str'}, } def __init__( self, **kwargs", "= None # type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check availability", "'str'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type':", "self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model):", "token grants access to the video content URLs.\". Variables are", "have this value set to false. :type has_data: bool :param", ":param endpoint_url: The URL of the endpoint. :type endpoint_url: str", "parameterized as a secret string in order to prevent this", "'is_in_use': {'required': True}, } _attribute_map = { 'can_stream': {'key': 'canStream',", "transport (data is encrypted in transit). All required parameters must", "resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\". :vartype type: str :ivar system_data:", "video through a pipeline job. Videos ingested through live pipelines", "'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ): super(Resource,", "{'required': True}, 'is_in_use': {'required': True}, } _attribute_map = { 'can_stream':", "resulting MP4 file can be played on any standard media", "self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource", "many different cameras, as long as the same processing is", "header. :type kid: str :param alg: Required. RSA algorithm to", "provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created", "): super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku = kwargs['sku'] self.description", "status: The status of the live pipeline operation. :vartype status:", "of the resource on API requests. :type password: str \"\"\"", ":param display: The operation display name. :type display: ~video_analyzer.models.OperationDisplay :param", "and publishing of content for a unique RTSP camera. Variables", "None) class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of an", "RTSP camera or generic RTSP server to be ingested into", "sub-classes and not this class directly. Known sub-classes are: VideoSink.", "class VideoAnalyzer(TrackedResource): \"\"\"The Video Analyzer account. Variables are only populated", "'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__( self, **kwargs ): super(NodeBase,", ":vartype unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation", "Known sub-classes are: PemCertificateList. All required parameters must be populated", "\"\"\" _validation = { 'code': {'readonly': True}, 'message': {'readonly': True},", "Larger segments reduce the amount of storage transactions while increasing", "pipeline. :type value: str \"\"\" _validation = { 'name': {'required':", "directly. Known sub-classes are: UsernamePasswordCredentials. All required parameters must be", "'#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder =", "{'key': 'bitrateKbps', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac':", "'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key':", ":vartype source_mdm_account: str :ivar source_mdm_namespace: The source MDM namespace. :vartype", "is 'archive' and video archiving is enabled. :type archive_base_url: str", "None) class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check availability result. :param name_available: Indicates", "default, strict validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\" _validation", "class ErrorResponse(msrest.serialization.Model): \"\"\"Common error response for all Azure Resource Manager", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'provisioning_state': {'readonly':", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id:", "'description', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay,", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'role': {'key': 'properties.role', 'type':", "with Elliptical Curve algorithm. All required parameters must be populated", "self.error = None class LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline represents a unique", "through the use of user-defined parameters, which allow for a", "'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason', 'type': 'str'}, 'message': {'key':", "'type': 'str'}, 'e': {'key': 'e', 'type': 'str'}, } def __init__(", "the video type is 'file' and video file is available", "PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of private endpoint connection associated with the specified", "operation applies to data-plane. :type is_data_action: bool :param action_type: Indicates", "def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None", "supported_time_grain_types: The supported time grain types. :vartype supported_time_grain_types: list[str] \"\"\"", "availability needs to be checked. :type name: str :param type:", ":ivar id: Fully qualified resource ID for the resource. Ex", "metric dimension name. :vartype name: str :ivar display_name: The display", "self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time',", "algorithm to be used: RS256, RS384 or RS512. Possible values", "any compatible DASH or HLS players by appending the following", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__(", "be used: RS256, RS384 or RS512. Possible values include: \"RS256\",", "'claims': {'key': 'claims', 'type': '[TokenClaim]'}, 'keys': {'key': 'keys', 'type': '[TokenKey]'},", "= { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} }", "None class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input parameters to generate registration token", "set to 'true', then \"disableRtspPublishing\" must be set to 'false'.", "expected token claims. All required parameters must be populated in", "the archive playback latency. Value must be specified in ISO8601", "are suitable for different applications and scenarios. Possible values include:", "{'key': 'systemData', 'type': 'SystemData'}, 'title': {'key': 'properties.title', 'type': 'str'}, 'description':", "PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link: A link to", "When set to 'true' content will not be archived or", "collection of Operation items. :type value: list[~video_analyzer.models.Operation] \"\"\" _attribute_map =", "endpoint URL. This is an optional property, typically used when", "= kwargs['n'] self.e = kwargs['e'] class SourceNodeBase(NodeBase): \"\"\"Base class for", "and not this class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All", "on the service provider require any updates on the consumer.", "to send to Azure. :param endpoint_url: The URL of the", "str self.alg = kwargs['alg'] self.n = kwargs['n'] self.e = kwargs['e']", "{'required': True}, 'password': {'required': True}, } _attribute_map = { 'type':", "'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'tags': {'key': 'tags',", "info: any \"\"\" _validation = { 'type': {'readonly': True}, 'info':", "uploading content to the archive. Default value is 30 seconds.", "super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model):", "account. Possible values include: \"Failed\", \"InProgress\", \"Succeeded\". :vartype provisioning_state: str", "self.target = None self.details = None self.additional_info = None class", "URLs.\". Variables are only populated by the server, and will", "and not this class directly. Known sub-classes are: RtspSource, VideoSource.", "self.metric_specifications = None class SinkNodeBase(NodeBase): \"\"\"Base class for topology sink", "\"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map = {", "self.name = kwargs.get('name', None) self.type = kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model):", "display name for the dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox:", "\"\"\" _validation = { 'id': {'required': True}, 'identity': {'required': True},", "network access is allowed for resources under the Video Analyzer", "'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def", "Resource Manager resources. Variables are only populated by the server,", "and its dynamic properties based on the current video state.", "default values to be used when they are not defined", "be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation =", "endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL", "\"Live\", \"Batch\". :type kind: str or ~video_analyzer.models.Kind :param sku: Required.", "None self.message = None self.target = None self.details = None", "sub-classes are: EccTokenKey, RsaTokenKey. All required parameters must be populated", "accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The", "given name is available. :type message: str \"\"\" _attribute_map =", "filled by server. :type type: str :param credentials: Required. Credentials", "'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name", "type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of the", "'width': {'key': 'width', 'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'},", "perform data analysis or transformations. * Sinks: list of one", ":type description: str :ivar state: Current state of the pipeline", "access control. :param public_network_access: Whether or not public network access", "segments provide lower archive playback latency but generate larger volume", "video. The value must be greater than zero, and less", "__init__( self, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type = None #", "Maximum bitrate capacity in Kbps reserved for the live pipeline.", "derived types.Constant filled by server. :type type: str :param credentials:", "'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'type_properties_type': {'key': 'properties.type',", "{'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly':", "_attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'}, } def", "{'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs)", "self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at =", "discriminator for derived types.Constant filled by server. :type type: str", "'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def __init__( self, **kwargs ): super(VideoSink,", "the RTSP messages are exchanged through long lived HTTP connections,", "{'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, }", "kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity', None) self.status = None class", "name. :vartype name: str :ivar display_name: The display name for", "'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def __init__( self, **kwargs", "self.name = None self.display_name = None self.display_description = None self.unit", "} def __init__( self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity =", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'title': {'key':", "'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, } def __init__( self, **kwargs", "'type': 'str'}, } def __init__( self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs)", "one of the given values. :type issuers: list[str] :param audiences:", "# type: str self.issuers = kwargs.get('issuers', None) self.audiences = kwargs.get('audiences',", "self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type:", "policies authentication methods. You probably want to use the sub-classes", "{'key': 'properties.type', 'type': 'str'}, 'flags': {'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls':", "type of the endpoint. Possible values include: \"ClientApi\". :type type:", "super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info = None class ErrorDetail(msrest.serialization.Model):", "lock aggregation type. Possible values include: \"Average\", \"Count\", \"Total\". :vartype", "def __init__( self, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by',", "for resources under the Video Analyzer account. Possible values include:", ":param description: The reason for approval/rejection of the connection. :type", "the initial handshake, the IoT edge module will agree on", "{'readonly': True}, } _attribute_map = { 'service_specification': {'key': 'serviceSpecification', 'type':", "str self.alg = kwargs['alg'] self.x = kwargs['x'] self.y = kwargs['y']", "it is only used for the initial handshake between IoT", "{ 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True},", "self).__init__(**kwargs) self.error = kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level network", "The diagnostic log category name. :vartype name: str :ivar display_name:", "None) self.status = None class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type for all", "bitrate capacity in Kbps reserved for the live pipeline. The", "created by exporting sequences from existing captured video through a", "be populated in order to send to Azure. :param key_identifier:", "'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, } def __init__(", "self, **kwargs ): super(TokenKey, self).__init__(**kwargs) self.type = None # type:", "\"\"\"The endpoint details. All required parameters must be populated in", "'type': 'str'}, 'flags': {'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls',", "status: The current status of the Iot Hub mapping. :vartype", "parameters, which allow for a topology to be parameterized. This", "service. Variables are only populated by the server, and will", "from your account. :vartype expiration: ~datetime.datetime :ivar error: Details about", "): super(Properties, self).__init__(**kwargs) self.service_specification = None class ResourceIdentity(msrest.serialization.Model): \"\"\"The user", "example https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier: The current key", "id. Validation keys are looked up based on the key", "value: A collection of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] \"\"\"", "} def __init__( self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type =", ":ivar token: The content token value to be added to", "**kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model):", "collection of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link: A", "in Kbps reserved for the live pipeline. The allowed range", "self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps", "send to Azure. :param type: Required. The identity type. :type", "__init__( self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name', None)", "def __init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel'", "the input video. :type bitrate_kbps: str :param frame_rate: The frame", "__init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' #", "kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of", "under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation =", "current status of the Key Vault mapping. :vartype status: str", "Possible values include: \"Pad\", \"PreserveAspectRatio\", \"Stretch\". :type mode: str or", "Video Analyzer player widget. Alternatively, this URL can be used", "encoding preset. Possible values include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type", "_validation = { 'type': {'required': True}, } _attribute_map = {", "True}, 'y': {'required': True}, } _attribute_map = { 'type': {'key':", "\"Reader\". :type role: str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method", "tier: The SKU tier. Possible values include: \"Standard\". :vartype tier:", "'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key':", "self.next_link = kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token properties. A", "'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs", "= kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for controlling the validation", "inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required.", "kwargs['type'] class EndpointBase(msrest.serialization.Model): \"\"\"Base class for endpoints. You probably want", "'mode': {'key': 'mode', 'type': 'str'}, } def __init__( self, **kwargs", ":type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault identity. :type", "The Azure Video Analyzer IoT edge module must be initialized", "'type': 'str'}, 'medium': {'key': 'medium', 'type': 'str'}, 'large': {'key': 'large',", "1 day) and can vary between 1 day to 10", "None class LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline represents a unique instance of", "These are only allowed for topologies where \"kind\" is set", "of an operation on the live pipeline. Variables are only", "sending a request. :ivar id: The ARM identifier for Private", "def __init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type = None", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True},", "str :param parameters: List of the topology parameter declarations. Parameters", "consumption: Public network access for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl", "the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity", "content types are suitable for different applications and scenarios. Possible", "Analyzer account. Possible values include: \"Enabled\", \"Disabled\". :type public_network_access: str", "automatically to try and match the quality of the input", "values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type last_modified_by_type: str or", "sub-classes are: EncoderProcessor. All required parameters must be populated in", "video_name: str :param time_sequences: Required. Describes a sequence of datetime", "= { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__(", "declared in the referenced topology. Topology parameters without a default", "'ranges': {'key': 'ranges', 'type': 'str'}, } def __init__( self, **kwargs", "(2-channel stereo audio at a sampling rate of 48 kHz).", "super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs.get('identity', None) self.status", "link resources. :param value: Array of private link resources. :type", "True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True},", "'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(AccountEncryption,", "order to send to Azure. :param endpoint_url: The URL of", "the live pipeline. Variables are only populated by the server,", "super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials =", "): super(MetricDimension, self).__init__(**kwargs) self.name = None self.display_name = None self.to_be_exported_for_shoebox", "str :ivar current_key_identifier: The current key used to encrypt Video", "None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions = None self.enable_regional_mdm_account =", "ISO8601 duration format (i.e. \"PT30S\" equals 30 seconds) and can", "VideoSource. All required parameters must be populated in order to", "'type': 'str'}, } def __init__( self, **kwargs ): super(Sku, self).__init__(**kwargs)", "the resource. Possible values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type", "{'key': 'expirationDate', 'type': 'iso-8601'}, } def __init__( self, **kwargs ):", "allow for data to be stored or exported to other", "**kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None) self.created_by_type =", "): super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url',", "{'key': 'canStream', 'type': 'bool'}, 'has_data': {'key': 'hasData', 'type': 'bool'}, 'is_in_use':", "{'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ):", "} def __init__( self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value =", "None) class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type for all video encoding presets,", "Videos can be ingested from RTSP cameras through live pipelines", "and video archiving is enabled. :type archive_base_url: str :param rtsp_tunnel_url:", "e: str \"\"\" _validation = { 'type': {'required': True}, 'kid':", ":type name: str or ~video_analyzer.models.SkuName :ivar tier: The SKU tier.", "# -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model):", "and the RTP packages are interleaved in the HTTP connections", "type: str :param bitrate_kbps: Bitrate, in kilobits per second or", "self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type:", "class for topology source nodes. You probably want to use", "# type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate',", "\"\"\"Describes an input signal to be used on a pipeline", "'system_data': {'readonly': True}, 'location': {'required': True}, } _attribute_map = {", "'type': {'required': True}, } _attribute_map = { 'name': {'key': 'name',", "{'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name':", "display_name: The metric display name. :vartype display_name: str :ivar display_description:", "part of the credentials. :type username: str :param password: Required.", "audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom preset", "__init__( self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind', None)", "value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to the next page", "'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type':", "Required. Credentials to be presented to the endpoint. :type credentials:", "'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key':", "True}, } _attribute_map = { 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},", "None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections =", "'VideoScale'}, } def __init__( self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type", "self.expiration_date = None self.token = None class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type", "connect to RTSP cameras and/or generic RTSP servers. :type endpoint:", "**kwargs ): super(Sku, self).__init__(**kwargs) self.name = kwargs['name'] self.tier = None", "audiences: List of expected token audiences. Token audience is valid", "kwargs['name'] self.type = kwargs['type'] self.description = kwargs.get('description', None) self.default =", "{'key': 'properties.requiredZoneNames', 'type': '[str]'}, } def __init__( self, **kwargs ):", "_attribute_map = { 'value': {'key': 'value', 'type': '[VideoEntity]'}, 'next_link': {'key':", "{'key': 'audiences', 'type': '[str]'}, 'claims': {'key': 'claims', 'type': '[TokenClaim]'}, 'keys':", "str :ivar type_properties_type: Video content type. Different content types are", "= None self.required_members = None self.required_zone_names = kwargs.get('required_zone_names', None) class", "based on the current video state. All required parameters must", "'type': 'str'}, } def __init__( self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs)", "'str'}, } def __init__( self, **kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period", "= { 'value': {'key': 'value', 'type': '[Operation]'}, } def __init__(", "the Account Key. Possible values include: \"SystemKey\", \"CustomerKey\". :type type:", "'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def __init__( self,", "segment length indicates the length of individual video files (segments)", "password: str \"\"\" _validation = { 'type': {'required': True}, 'username':", "API requests. :type password: str \"\"\" _validation = { 'type':", "value of the claim to be present on the token.", "rules, and control access to specific video resources. Variables are", ":type value: list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map = { 'value': {'key': 'value',", "'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type': 'str'}, 'device_id': {'key': 'deviceId', 'type':", "to connect to. This contains the required information for Video", "'type': 'bool'}, 'has_data': {'key': 'hasData', 'type': 'bool'}, 'is_in_use': {'key': 'isInUse',", "identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Iot", "request. :ivar id: The ARM identifier for Private Endpoint. :vartype", "present on the JWT token header. :type kid: str :param", "VideoSequenceAbsoluteTimeMarkers. All required parameters must be populated in order to", "must be specified in ISO8601 duration format (i.e. \"PT30S\" equals", "kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): \"\"\"Metric properties. Variables are only populated", "same IoT edge module in case the module state lost", "**kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration', None) self.ingestion =", "in order to send to Azure. :param name: Required. Name", "'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, } def", "JWT token header. :type kid: str :param alg: Required. RSA", "None) self.ingestion = kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption', None) class", "{'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate':", "int :ivar state: Current state of the pipeline (read-only). Possible", ":param credentials: Required. Credentials to be presented to the endpoint.", "{'required': True}, 'identity': {'required': True}, 'status': {'readonly': True}, } _attribute_map", "{'key': 'value', 'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, }", "to Azure. :param id: Required. The ID of the storage", "packages are interleaved in the HTTP connections alongside the RTSP", "'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type':", "content older than 30 days will be periodically deleted. This", "the pipeline can connect to over clear transport (no encryption", "'description', 'type': 'str'}, 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, } def", "'type': {'required': True}, 'status': {'readonly': True}, } _attribute_map = {", "has been Approved/Rejected/Removed by the owner of the service. Possible", "sending a request. :param tags: A set of tags. Resource", "of the pipeline topology. It is recommended that the expected", "x: Required. X coordinate. :type x: str :param y: Required.", ":type consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map = { 'integration': {'key': 'integration',", "self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name = None self.display_name =", "Video Analyzer will use to access the storage account. :type", "on the JWT token header. :type kid: str \"\"\" _validation", "'{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type':", "content authorization token to expose a WebSocket tunneled RTSP stream.", "ID. :vartype principal_id: str \"\"\" _validation = { 'client_id': {'readonly':", "None) self.large = kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags used", "pipelines refer to different values, such as individual cameras' RTSP", "_validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox':", "self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type:", "the bitrate of the input audio is used. :type bitrate_kbps:", "Known sub-classes are: EccTokenKey, RsaTokenKey. All required parameters must be", ":vartype name: str :ivar display_name: The display name for the", "self.id = None class PrivateEndpointConnection(Resource): \"\"\"The Private Endpoint Connection resource.", "output is used as input of the current node. :type", "audiences. Token audience is valid if it matches at least", "base URL. The archived content can be automatically played by", "self.principal_id = None class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and password credentials. All", "= kwargs.get('identity', None) self.status = None class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder = kwargs.get('audio_encoder',", "~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to be used when validating", "'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type':", "video and audio content. :param segment_length: Video segment length indicates", "'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self,", "is used only for archiving content. Default is 'false'. If", "allows for video and audio to be captured, optionally archived,", "'client_id': {'key': 'clientId', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'},", "True}, 'kid': {'required': True}, 'alg': {'required': True}, 'n': {'required': True},", "'type': {'readonly': True}, 'info': {'readonly': True}, } _attribute_map = {", "control access to specific video resources. Variables are only populated", "= kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps',", "} def __init__( self, **kwargs ): super(VideoScale, self).__init__(**kwargs) self.height =", "{'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description':", "'name': {'required': True}, 'video_name': {'required': True}, 'time_sequences': {'required': True}, }", "= { 'disable_archive': {'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing',", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param role:", "that this value is parameterized as a secret string in", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} }", "Video Analyzer to connect to. This contains the required information", "True}, 'name': {'required': True}, 'endpoint': {'required': True}, } _attribute_map =", "behavior and will be lost if the code is regenerated.", "of archived content. Variables are only populated by the server,", "of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of", "through the Azure IoT Edge module twin properties. :vartype token:", "activated, this pipeline job will process content according to the", "endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates:", ":param media_info: Contains information about the video and audio content.", "be returned as part of the resource on API requests.", "): super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg", "name: The name of the resource. :vartype name: str :ivar", "days), content older than 30 days will be periodically deleted.", "} def __init__( self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type =", "class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags used to change how video is", "url: Required. The endpoint URL for Video Analyzer to connect", "**kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.storage_accounts =", "and will be ignored when sending a request. :param tags:", "{'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, }", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology kind. Possible", "= '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model):", "with a default value can be optionally be overridden. :type", "as part of the resource on API requests. :type password:", "= kwargs.get('provider', None) self.resource = kwargs.get('resource', None) self.operation = kwargs.get('operation',", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'certificates':", "self.info = None class ErrorDetail(msrest.serialization.Model): \"\"\"The error detail. Variables are", "'#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details of the", "__init__( self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "identities associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity", "Widget or compatible players. Exported videos can be downloaded as", ":type large: str \"\"\" _attribute_map = { 'small': {'key': 'small',", "self.e = kwargs['e'] class SourceNodeBase(NodeBase): \"\"\"Base class for topology source", "topology. :type name: str \"\"\" _validation = { 'type': {'required':", "self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id =", "True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly': True}, } _attribute_map =", "to use when establishing the remote tunnel. This string is", "list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification]", "list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[VideoAnalyzer]'},", "str or ~video_analyzer.models.VideoType :ivar flags: Video flags contain information about", "on the current video state. All required parameters must be", "_validation = { 'node_name': {'required': True}, } _attribute_map = {", "display_name: The display name for the dimension. :vartype display_name: str", "{ 'type': {'key': '@type', 'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type':", "super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.identity = kwargs.get('identity', None)", "IoT edge module to be initialized and authorized to the", "= kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated", "1 day increments. When absent (null), all video content is", "through live pipelines or can be created by exporting sequences", "names. :vartype required_members: list[str] :param required_zone_names: The private link resource", "= { 'type': {'key': '@type', 'type': 'str'}, 'ranges': {'key': 'ranges',", "of upstream node references within the topology to be used", "to the video content URL as the value for the", "with the video content authorization token on any compatible DASH", "= '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name = kwargs['name'] class Endpoint(msrest.serialization.Model):", "super(MetricSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.display_description =", "): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): \"\"\"Access policies help define the", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} }", "{'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint':", "system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags.", "None class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and password credentials. All required parameters", "retention_period: Video retention period indicates the maximum age of the", "(i.e. \"PT30S\" equals 30 seconds) and can vary between 30", "'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key':", "to change the resolution from 4K to 1280x720. All required", "when sending a request. :ivar id: The ARM identifier for", "id: str \"\"\" _validation = { 'id': {'readonly': True}, }", "5 minutes, in 30 seconds increments. Changing this value after", "'type': {'required': True}, } _attribute_map = { 'endpoint_url': {'key': 'endpointUrl',", "None class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection of VideoAnalyzer items. :param value:", "{'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel':", "https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier: The current key used", "{'required': True}, 'sku': {'required': True}, } _attribute_map = { 'id':", "within the topology. :type name: str :param video_name: Required. Name", "Required. Type of the parameter. Possible values include: \"String\", \"SecretString\",", "self.identity = kwargs['identity'] self.status = None class JwtAuthentication(AuthenticationBase): \"\"\"Properties for", "are: EccTokenKey, RsaTokenKey. All required parameters must be populated in", "the pipeline which output is used as input of the", "class PemCertificateList(CertificateSource): \"\"\"A list of PEM formatted certificates. All required", "\"\"\"Status of video analyzer operation. All required parameters must be", "The IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The", "super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None)", "class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details of the user assigned managed identity", "endpoint URL for Video Analyzer to connect to. :type url:", "as a string. You probably want to use the sub-classes", "str :param type: Required. Type of the parameter. Possible values", "_validation = { 'id': {'required': True}, 'identity': {'required': True}, 'status':", "archive streaming base URL. The archived content can be automatically", "str :ivar display_name: The display name for the dimension. :vartype", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type':", "storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status", "~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for approval/rejection of the connection.", "'str'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self,", "'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type", ":type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to the next", "super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name =", "'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self,", "super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs['value'] class TrackedResource(Resource):", "coordinate. :type y: str \"\"\" _validation = { 'type': {'required':", "values and can later be defined in individual instances of", "required member names. :vartype required_members: list[str] :param required_zone_names: The private", "to other destinations. Variables are only populated by the server,", "'bool'}, 'is_in_use': {'key': 'isInUse', 'type': 'bool'}, } def __init__( self,", "'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},", "iot_hub_name: str :param device_id: Required. The IoT device id to", "maximum bitrate, in kilobits per second or Kbps, at which", "private_endpoint: The resource of private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint", "data recorded or uploaded into the video. Newly created videos", "kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview image URLs. These URLs", "self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type = None # type:", "} _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'info':", "\"\"\"The Private Endpoint Connection resource. Variables are only populated by", "24 hours. :type retention_period: str \"\"\" _attribute_map = { 'retention_period':", "segment_length: str \"\"\" _attribute_map = { 'segment_length': {'key': 'segmentLength', 'type':", "module is able to periodically connect to the cloud. A", "information about the state of the connection between service consumer", "def __init__( self, **kwargs ): super(TokenKey, self).__init__(**kwargs) self.type = None", "info. Variables are only populated by the server, and will", "pipeline topology parameter. See pipeline topology parameters for more information.", "are: VideoSequenceAbsoluteTimeMarkers. All required parameters must be populated in order", "self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type", "MetricDimension(msrest.serialization.Model): \"\"\"A metric dimension. Variables are only populated by the", "video with the H.264 (AVC) codec. All required parameters must", "self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type:", "'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopologyUpdate,", "EccTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated with Elliptical Curve", "'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},", "the topology. :type name: str \"\"\" _validation = { 'type':", "set to false. :type has_data: bool :param is_in_use: Required. Value", "a maximum of 10 years. For example, if this is", "{'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type':", "to send to Azure. :param name: Required. The operation name.", "tags. :type tags: dict[str, str] :param identity: The identities associated", "By default, strict validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\"", "= { 'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, } def __init__(", "'disable_archive': {'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'},", "# type: str self.name = kwargs['name'] class Endpoint(msrest.serialization.Model): \"\"\"The endpoint", "class VideoSource(SourceNodeBase): \"\"\"Video source allows for content from a Video", "self.message = kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): \"\"\"Base class for credential", "'issuers': {'key': 'issuers', 'type': '[str]'}, 'audiences': {'key': 'audiences', 'type': '[str]'},", "{'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'id':", "'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'},", "{ 'name': {'required': True}, } _attribute_map = { 'name': {'key':", ":param next_link: A link to the next page of the", "status: str \"\"\" _validation = { 'type': {'required': True}, 'status':", "def __init__( self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", "'JwtAuthentication'} } def __init__( self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type", "= kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content token grants access", "job operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name':", "nodes such as an RTSP source which allows for content", "content token value to be added to the video content", "content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties. :type", "'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type':", "type: Optional[str] class TlsEndpoint(EndpointBase): \"\"\"TLS endpoint describes an endpoint that", "str :ivar info: The additional info. :vartype info: any \"\"\"", "ranges should add up to 24 hours or less. Currently,", "tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted certificate authorities when", "type: Optional[str] self.name = kwargs['name'] class ProcessorNodeBase(NodeBase): \"\"\"Base class for", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'issuers':", "for all audio encoder presets, which define the recipe or", "'type': '[str]'}, } def __init__( self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs)", "= kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type',", "used to change the resolution from 4K to 1280x720. All", "The private link resource group id. :vartype group_id: str :ivar", "long as the module is able to periodically connect to", ":ivar principal_id: The principal ID. :vartype principal_id: str \"\"\" _validation", "= kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The", "Azure IoT Edge module twin properties. :vartype token: str \"\"\"", "compatible DASH or HLS players by appending the following to", ":vartype client_id: str :ivar principal_id: The principal ID. :vartype principal_id:", "of the parameter. Possible values include: \"String\", \"SecretString\", \"Int\", \"Double\",", "): super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model): \"\"\"An operation.", "'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type':", "'value', 'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def", "'type': 'bool'}, 'action_type': {'key': 'actionType', 'type': 'str'}, } def __init__(", "of Azure Video analyzer IoT edge module to be initialized", "sending a request. :ivar expiration_date: The content token expiration date", "'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipeline, self).__init__(**kwargs)", "self).__init__(**kwargs) self.value = kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed identity", "License. See License.txt in the project root for license information.", "Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current", ":type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\" _validation = { 'type': {'required': True},", "~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access for consumption group. :type", "= { 'type': {'key': '@type', 'type': 'str'}, 'issuers': {'key': 'issuers',", "derived types.Constant filled by server. :type type: str :param audio_encoder:", "the error for a failed pipeline job. :param code: The", "topology_name: The reference to an existing pipeline topology defined for", "'y': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type:", "seamless key rotation of the token signing key. Token signature", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint':", "'#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__( self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs)", "category name. :vartype name: str :ivar display_name: The diagnostic log", "custom preset for encoding video with the H.264 (AVC) codec.", "be used as the source. :type video_name: str :param time_sequences:", "key used to encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param", "to use when accessing a resource. All required parameters must", "'@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset',", "\"\"\"Video preview image URLs. These URLs can be used in", "Endpoint(msrest.serialization.Model): \"\"\"The endpoint details. All required parameters must be populated", "class TunnelBase(msrest.serialization.Model): \"\"\"Base class for tunnel objects. You probably want", "status: str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail", "whether or not there has ever been data recorded or", "the use of \"${PARAMETER_NAME}\" string pattern. Parameters can have optional", "'id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "} def __init__( self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access =", "_attribute_map = { 'can_stream': {'key': 'canStream', 'type': 'bool'}, 'has_data': {'key':", "endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase \"\"\" _validation", "} def __init__( self, **kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value =", "def __init__( self, **kwargs ): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): \"\"\"Access", "retry to re-establish connection (with exponential backoff), checking to see", "'type': {'required': True}, 'name': {'required': True}, 'video_name': {'required': True}, 'time_sequences':", "for topology sink nodes. You probably want to use the", "class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence of absolute datetime ranges as a", "Current state of the pipeline (read-only). Possible values include: \"Processing\",", "kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate', None) self.scale = kwargs.get('scale', None)", "list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update operation for a Video", "'PemCertificateList'} } def __init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type", "the claim to be present on the token. :type value:", "values include: \"Pending\", \"Approved\", \"Rejected\". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus", "True}, 'time_sequences': {'required': True}, } _attribute_map = { 'type': {'key':", "of the claim which must be present on the token.", "role: Defines the access level granted by this policy. Possible", "Analyzer to connect to RTSP cameras and/or generic RTSP servers.", "kwargs.get('sku', None) self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None)", "self.error = kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update operation for", "super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None)", "type. :type operation: str :param description: The operation description. :type", "achieved and can be reused across many pipeline instances which", "over TLS transport (data is encrypted in transit). All required", "'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password': {'key': 'password', 'type':", "operation display name. :type display: ~video_analyzer.models.OperationDisplay :param origin: Origin of", "the Azure Video Analyzer IoT edge module through the Azure", "'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key':", "Low resolution preview image URL. :type small: str :param medium:", "self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type:", "str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail \"\"\"", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type':", ":type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network", "'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state',", "time grain types. :vartype supported_time_grain_types: list[str] \"\"\" _validation = {", "the encoded video. If omitted, the encoder uses the resolution", "include: \"Archive\", \"File\". :vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags:", ":type tunnel: ~video_analyzer.models.TunnelBase \"\"\" _validation = { 'type': {'required': True},", "= { 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink',", "service_specification: The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification \"\"\" _validation =", "_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key':", ":param required_zone_names: The private link resource Private link DNS zone", "Azure. :param key_identifier: Required. The URL of the Key Vault", "super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None)", "None) self.description = kwargs.get('description', None) self.type_properties_type = None self.flags =", "information for Video Analyzer to connect to RTSP cameras and/or", "None self.blob_duration = None class MetricDimension(msrest.serialization.Model): \"\"\"A metric dimension. Variables", "= kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type',", "True}, 'display_name': {'readonly': True}, 'blob_duration': {'readonly': True}, } _attribute_map =", "'reason', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def", "a 'location'. Variables are only populated by the server, and", "= '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg = kwargs['alg'] self.x =", "It will not have tags and a location. Variables are", "str :ivar state: Current state of the pipeline (read-only). Possible", "years, in 1 day increments. When absent (null), all video", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def", "'{str}'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type':", "type: str :param username: Required. Username to be presented as", "collection of PipelineJob items. :param value: A collection of PipelineJob", "long. :type title: str :param description: Optional video description provided", "can_stream: bool :param has_data: Required. Value indicating whether or not", "'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType',", ":type username: str :param password: Required. Password to be presented", "authorization token to download the video MP4 file. The resulting", ":vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional MDM account", "value: A collection of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param", "tags. :type tags: dict[str, str] :param location: Required. The geo-location", "def __init__( self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", ":ivar additional_info: The error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\"", "{'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types':", "an optional property, typically used when the endpoint is behind", "Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts", "True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly': True},", "Operation end time. :type end_time: str :param status: Operation status.", "{ 'can_stream': {'key': 'canStream', 'type': 'bool'}, 'has_data': {'key': 'hasData', 'type':", "self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type = None # type:", "def __init__( self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset'", "{ 'expiration_date': {'required': True}, } _attribute_map = { 'expiration_date': {'key':", "{ 'type': {'required': True}, 'username': {'required': True}, 'password': {'required': True},", "on a pipeline node. All required parameters must be populated", "Required. Y coordinate. :type y: str \"\"\" _validation = {", "state lost or reset. Variables are only populated by the", "self.parameters = kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic log emitted", "If used in a batch topology, this allows for video", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId',", "True}, 'content_urls': {'readonly': True}, } _attribute_map = { 'id': {'key':", ":param ingestion: Public network access for ingestion group. :type ingestion:", "super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport =", ":type endpoint: ~video_analyzer.models.EndpointBase \"\"\" _validation = { 'type': {'required': True},", "operation on the pipeline job. Variables are only populated by", "details for the live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\"", "account resource. Video Analyzer relies on tables, queues, and blobs.", "for encoding the input content using the encoder processor. All", "the video resource. This property is only allowed for topologies", "enable pipeline data to be analyzed, processed or transformed. :type", "on how audio should be processed. You probably want to", "'reason': {'key': 'reason', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'},", "= { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description',", ":param name: Required. The operation name. :type name: str :param", "= kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type for all video", "} _attribute_map = { 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, }", "super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges =", "does not specify a value. :type default: str \"\"\" _validation", "3000 Kbps in increments of 100 Kbps. If the RTSP", "'type': 'ServiceSpecification'}, } def __init__( self, **kwargs ): super(Properties, self).__init__(**kwargs)", "or more data sinks which allow for data to be", "'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key':", "'@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key':", "The geo-location where the resource lives. :type location: str \"\"\"", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key':", "the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale \"\"\"", "AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection of AccessPolicyEntity items. :param value: A collection", "def __init__( self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier']", "'type': {'key': 'type', 'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'},", "True}, 'inputs': {'required': True}, 'video_name': {'required': True}, } _attribute_map =", "'#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder", "'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type':", "**kwargs ): super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str", "chain trust validation to be skipped. Default is 'false'. :type", "type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): \"\"\"A custom", "kwargs['key_identifier'] self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input parameters to", "def __init__( self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", "required_zone_names: The private link resource Private link DNS zone name.", "'username': {'key': 'username', 'type': 'str'}, 'password': {'key': 'password', 'type': 'str'},", ":type name: str :param id: Operation resource ID. :type id:", "{ 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type':", "**kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "use when accessing a resource. All required parameters must be", "used when they are not defined in the pipelines. All", ":ivar details: The error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info:", "to true, then no content is archived. :type video_name: str", "a request. :ivar name: The diagnostic log category name. :vartype", "None) self.consumption = kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): \"\"\"Describes an input", "Optional title provided by the user. Value can be up", "credentials. Overall a topology is composed of the following: *", "is specific to a single video. :vartype token: str \"\"\"", "value: Array of private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\"", "'publicNetworkAccess', 'type': 'str'}, } def __init__( self, **kwargs ): super(GroupLevelAccessControl,", "values include: \"ClientApi\". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation", "= None self.required_zone_names = kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list", "kwargs['value'] class TrackedResource(Resource): \"\"\"The resource model definition for an Azure", "kwargs.get('role', None) self.authentication = kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection", "need be provided. Possible values include: \"Pad\", \"PreserveAspectRatio\", \"Stretch\". :type", "self.parameters = kwargs.get('parameters', None) class PipelineTopology(ProxyResource): \"\"\"Pipeline topology describes the", "stream. It is available when the video type is 'archive'", "} def __init__( self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type =", "of private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A", "Describes a custom preset for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase", "will use to access the storage account. :type identity: ~video_analyzer.models.ResourceIdentity", "nodes. Sink nodes allow pipeline data to be stored or", "user assigned managed identity's resource identifier to use when accessing", "'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type':", "kwargs['password'] class VideoAnalyzer(TrackedResource): \"\"\"The Video Analyzer account. Variables are only", "'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs", "'str'}, } def __init__( self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date", "self.client_id = None self.principal_id = None class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and", "self, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model):", "True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'},", "encoder uses the resolution of the input video. :type scale:", "True}, 'x': {'required': True}, 'y': {'required': True}, } _attribute_map =", "'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'},", "Required. The URL of the Key Vault key used to", "Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation = {", "list[str] \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "kwargs.get('small', None) self.medium = kwargs.get('medium', None) self.large = kwargs.get('large', None)", "This is an optional property, typically used when the endpoint", "def __init__( self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type = None", "provisioning state of the private endpoint connection resource. Possible values", "'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs)", "None) self.error = kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of private", "'type': '[str]'}, } def __init__( self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs)", "why the given name is not available. Possible values include:", "self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type:", ":type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information about", "{'required': True}, 'inputs': {'required': True}, 'video_name': {'required': True}, } _attribute_map", "endpoints. You probably want to use the sub-classes and not", "= kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level network access control.", "'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'}, } def __init__(", "{'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration':", "Details about the error, in case the pipeline job fails.", "network access for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption:", "on any standard media player. It is available when the", "pattern. Parameters can have optional default values and can later", "list[str] :param required_zone_names: The private link resource Private link DNS", "can and must be referenced throughout the topology and can", "list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo]", ":param status: Indicates whether the connection has been Approved/Rejected/Removed by", "~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map = { 'height': {'key': 'height', 'type': 'str'},", "the video type is 'archive' and preview images are enabled.", "the length of individual video files (segments) which are persisted", "'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'},", "credentials. :type username: str :param password: Required. Password to be", "audio is used. :type bitrate_kbps: str \"\"\" _validation = {", "__init__( self, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None)", "str :ivar unit: The metric unit. Possible values include: \"Bytes\",", "} def __init__( self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive =", "with the specified storage account. :param value: Array of private", "A collection of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map", "): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None)", "sub-classes and not this class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers.", "\"\"\"A collection of EdgeModuleEntity items. :param value: A collection of", "when sending a request. :ivar log_specifications: List of log specifications.", "on which the operation is performed. :type resource: str :param", "**kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model):", "type. Possible values include: \"Internal\". :type action_type: str or ~video_analyzer.models.ActionType", "kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection of PipelineJob items. :param", "'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self,", "= { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs", "or RS512. Possible values include: \"RS256\", \"RS384\", \"RS512\". :type alg:", "'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } _subtype_map =", "class PipelineTopology(ProxyResource): \"\"\"Pipeline topology describes the processing steps to be", "{'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type':", "according to the scenario to be achieved and can be", "'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__(", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'username': {'key':", "selected portions of archived content. Variables are only populated by", "\"\"\"A collection of PipelineJob items. :param value: A collection of", "'true' content will not be archived or recorded. This is", "_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key':", "'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'},", "{'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__( self, **kwargs ): super(TunnelBase, self).__init__(**kwargs)", "period will be effective within 24 hours. :type retention_period: str", "for the dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox: Whether to", "claims: List of additional token claims to be validated. Token", "Name of the IoT Hub. :type iot_hub_name: str :param device_id:", "{ 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__( self, **kwargs ):", "name: Required. Name of the built-in encoding preset. Possible values", "scale: Describes the resolution of the encoded video. If omitted,", "videos can be streamed. :type can_stream: bool :param has_data: Required.", "include: \"Invalid\", \"AlreadyExists\". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message:", "server. :type type: str :param name: Required. Name of the", "The ID of the storage account resource. Video Analyzer relies", "): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier = None class", "to be used as the source. :type video_name: str :param", "video source only picks up recorded media within these ranges.", "self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource): \"\"\"Pipeline job represents a unique", ":param x: Required. X coordinate. :type x: str :param y:", "name for the dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox: Whether", "self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class", "'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder',", "list[~video_analyzer.models.PipelineTopology] :param next_link: A link to the next page of", "or existing video resource used to capture and publish content.", "'[PrivateLinkResource]'}, } def __init__( self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value", "Azure. :param expiration_date: Required. The desired expiration date of the", "content is retained indefinitely. This property is only allowed for", "standard media player. It is available when the video type", "parameter declaration. Declared parameters can and must be referenced throughout", "bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps,", ":ivar dimensions: The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account:", "The IoT device id to use when establishing the remote", "alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate. :type", "{ 'type': {'required': True}, 'iot_hub_name': {'required': True}, 'device_id': {'required': True},", "self, **kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name'] self.value =", "source_mdm_account: str :ivar source_mdm_namespace: The source MDM namespace. :vartype source_mdm_namespace:", "provisioning_state: Provisioning state of the Video Analyzer account. Possible values", "only for low latency video streaming. Default is 'false'. If", "preset, which defines the recipe or instructions on how the", "or ~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation = { 'type': {'required': True}, 'name':", "A set of tags. Resource tags. :type tags: dict[str, str]", "preset for encoding the input content using the encoder processor.", "'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__( self, **kwargs", "self).__init__(**kwargs) self.type = None # type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes", "will be ignored when sending a request. :ivar name: The", "or HLS players by appending the following to the base", "characters long. :type description: str :param segment_length: Segment length indicates", "{ 'type': {'key': '@type', 'type': 'str'}, 'issuers': {'key': 'issuers', 'type':", "str self.issuers = kwargs.get('issuers', None) self.audiences = kwargs.get('audiences', None) self.claims", "\"\"\"A sequence of absolute datetime ranges as a string. The", "parameters values for parameters which have been declared in the", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[VideoAnalyzer]'}, }", "use of \"${PARAMETER_NAME}\" string pattern. Parameters can have optional default", "error: Details about the error, in case the pipeline job", "the connection between service consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState", "= { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly':", "'isDataAction', 'type': 'bool'}, 'action_type': {'key': 'actionType', 'type': 'str'}, } def", "the token signing key. Token signature must match exactly one", "): super(Sku, self).__init__(**kwargs) self.name = kwargs['name'] self.tier = None class", "= '#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name = kwargs['video_name'] self.time_sequences =", "to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation", "~video_analyzer.models.SkuName :ivar tier: The SKU tier. Possible values include: \"Standard\".", "in order to send to Azure. :param id: Required. The", "ignored when sending a request. :ivar service_specification: The service specifications.", "'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},", "duration format in the granularity of days, up to a", "TCP, the RTP packets are interleaved on the TCP RTSP", "can connect to the endpoint URL. This is an optional", "video file is available for consumption. :type download_url: str :param", "__init__( self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "token issuers. Token issuer is valid if it matches at", "= None self.status = None self.error = None class LivePipelineUpdate(ProxyResource):", "# type: Optional[str] self.kid = kwargs['kid'] class EccTokenKey(TokenKey): \"\"\"Required validation", "'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs", "{ 'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, } def __init__( self,", "cloud account. The provisioning token itself is short lived and", "input content should be processed. You probably want to use", "_attribute_map = { 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'next_link': {'key':", "of type 'archive'. If used in a batch topology, this", "topology defined for real-time content processing. When activated, this live", "{ 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type':", "{'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id':", "class PipelineJobError(msrest.serialization.Model): \"\"\"Details about the error for a failed pipeline", "'[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name", "it can used to change the resolution from 4K to", "the following: * Parameters: list of user defined parameters that", "conjunction with the video content authorization token on any compatible", "} def __init__( self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date =", "{ 'id': {'required': True}, 'status': {'readonly': True}, } _attribute_map =", "__init__( self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status', None)", "class for tunnel objects. You probably want to use the", "2048 characters long. :type description: str :ivar type_properties_type: Video content", "order to send to Azure. :param node_name: Required. The name", "super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview", ":vartype info: any \"\"\" _validation = { 'type': {'readonly': True},", "{ 'node_name': {'required': True}, } _attribute_map = { 'node_name': {'key':", "True}, } _attribute_map = { 'can_stream': {'key': 'canStream', 'type': 'bool'},", "validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\" _validation = { 'type': {'required': True}, 'credentials':", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'},", "- HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF:", "): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class", "{'readonly': True}, 'location': {'required': True}, 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly':", "self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview image", "\"\"\"Properties for access validation based on JSON Web Tokens (JWT).", "all audio encoder presets, which define the recipe or instructions", "super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None) self.type = kwargs['type'] class", "response for all Azure Resource Manager resources. Variables are only", "'type': {'key': '@type', 'type': 'str'}, 'issuers': {'key': 'issuers', 'type': '[str]'},", "} def __init__( self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value =", "topology which captures content from a RTSP camera and archives", "'value': {'key': 'value', 'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type': 'str'},", "None) class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection of AccessPolicyEntity items. :param value:", ":param location: Required. The geo-location where the resource lives. :type", "Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether", "include: \"Reader\". :type role: str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication", "kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type', None)", "resource. All required parameters must be populated in order to", "None self.parameters = kwargs.get('parameters', None) class PipelineTopology(ProxyResource): \"\"\"Pipeline topology describes", "the user-defined topology parameters. A pipeline can only define or", "only used for the initial handshake between IoT edge module", "self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey):", "'expiration_date': {'readonly': True}, 'token': {'readonly': True}, } _attribute_map = {", "request body. :param name: The name of the resource for", "{'key': 'publicNetworkAccess', 'type': 'str'}, } def __init__( self, **kwargs ):", "'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): super(CredentialsBase,", "where \"kind\" is set to \"live\". :type retention_period: str \"\"\"", "network access is allowed for specified resources under the Video", "bool :param is_in_use: Required. Value indicating whether or not the", "= None self.unit = None self.aggregation_type = None self.lock_aggregation_type =", "{ 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type':", ":vartype expiration_date: ~datetime.datetime :ivar token: The token blob to be", "= None self.error = None self.parameters = kwargs.get('parameters', None) class", "): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group", "type: str :ivar system_data: Azure Resource Manager metadata containing createdBy", "a key without a version (for example https://vault/keys/mykey). :type key_identifier:", ":ivar flags: Video flags contain information about the available video", "self.tags = kwargs.get('tags', None) self.identity = kwargs.get('identity', None) self.storage_accounts =", "segment_length: Video segment length indicates the length of individual video", "increments. :type segment_length: str \"\"\" _attribute_map = { 'segment_length': {'key':", "the connection between service consumer and provider. :param status: Indicates", "_validation = { 'client_id': {'readonly': True}, 'principal_id': {'readonly': True}, }", "node references within the topology to be used as inputs", "self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name = kwargs['video_name'] self.time_sequences", "None) class OperationCollection(msrest.serialization.Model): \"\"\"A collection of Operation items. :param value:", "account is enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The source", "{'key': 'description', 'type': 'str'}, } def __init__( self, **kwargs ):", "\"\"\"The Private Endpoint resource. Variables are only populated by the", "share the same processing characteristics. For instance, a pipeline topology", "the encoder uses the resolution of the input video. :type", "preset for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes", "~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map = { 'download_url': {'key': 'downloadUrl', 'type': 'str'},", "self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None", "duration format (i.e. \"P1D\" equals 1 day) and can vary", "{ 'type': {'readonly': True}, 'info': {'readonly': True}, } _attribute_map =", "specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric specifications.", "(R) AutoRest Code Generator. # Changes may cause incorrect behavior", "list designates that Azure Video Analyzer's list of trusted authorities", "\"\"\"Base class for credential objects. You probably want to use", "Video Analyzer account. Variables are only populated by the server,", "collection of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link: A", "is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization", "send to Azure. :param endpoint_url: The URL of the endpoint.", "follows the OData error response format.). :param error: The error", "'ErrorDetail'}, } def __init__( self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name", "{'key': 'status', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'actions_required':", "class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update operation for a Video Analyzer account.", "'type': 'str'}, 'y': {'key': 'y', 'type': 'str'}, } def __init__(", "certificate sources. You probably want to use the sub-classes and", "{'key': 'name', 'type': 'str'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'time_sequences':", "{'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs':", "**kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities',", "'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__( self, **kwargs", "'inputs': {'required': True}, 'preset': {'required': True}, } _attribute_map = {", "} def __init__( self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type =", "send to Azure. :param expiration_date: Required. The desired expiration date", "self.created_by = kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type', None) self.created_at =", "type: str self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder', None)", "= kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors',", "'@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key':", "Player Widget or compatible players. Exported videos can be downloaded", "self.token = None class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type for all encoder", "'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } _subtype_map = {", "'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(LivePipelineOperationStatus,", "'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error',", "Operation resource ID. :type id: str :param start_time: Operation start", "def __init__( self, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type = None", "self).__init__(**kwargs) self.integration = kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion', None) self.consumption", "video resources. Variables are only populated by the server, and", "sending a request. :ivar name: The name of the live", "to 'false'. :type disable_rtsp_publishing: str \"\"\" _attribute_map = { 'disable_archive':", "\"\"\"An operation. All required parameters must be populated in order", "'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival',", "built-in encoding preset. Possible values include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\".", "resource required member names. :vartype required_members: list[str] :param required_zone_names: The", "a request. All required parameters must be populated in order", "class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and password credentials. All required parameters must", "Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.", "str :param actions_required: A message indicating if changes on the", "token key id. Validation keys are looked up based on", ":type value: list[~video_analyzer.models.PipelineJob] :param next_link: A link to the next", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'group_id':", "self.next_link = kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): \"\"\"Defines how the Video", "'node_name': {'required': True}, } _attribute_map = { 'node_name': {'key': 'nodeName',", "'#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name = kwargs['video_name'] self.time_sequences = kwargs['time_sequences']", "operation: The operation type. :type operation: str :param description: The", "'[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind", "'location': {'required': True}, 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections':", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param title: Optional", "'@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobCollection,", "data to be stored or exported to other destinations. Variables", "kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of private", "pipeline which output is used as input of the current", "Kbps, at which video should be encoded. If omitted, encoder", "'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type':", "analyzer operation. All required parameters must be populated in order", "class OperationDisplay(msrest.serialization.Model): \"\"\"Operation details. :param provider: The service provider. :type", "frame_rate: str :param scale: Describes the resolution of the encoded", "str \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':", "'currentKeyIdentifier', 'type': 'str'}, } def __init__( self, **kwargs ): super(KeyVaultProperties,", "str :param description: Optional video description provided by the user.", "processing steps to be applied when processing content for a", "self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of private endpoint connection", "{ 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type':", "**kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model):", "def __init__( self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None", "'str'}, 'password': {'key': 'password', 'type': 'str'}, } def __init__( self,", "is parameterized as a secret string in order to prevent", "'key_identifier': {'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},", "identifier to use when accessing a resource. :type user_assigned_identity: str", "'[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type':", "is composed of the following: * Parameters: list of user", "'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'x': {'key': 'x', 'type':", "super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type = kwargs.get('type', None)", "now below the reserved capacity. Doing so will ensure that", "str :param end_time: Operation end time. :type end_time: str :param", "= kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor allows for encoding of", "should add up to 24 hours or less. Currently, there", "class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection of VideoAnalyzer items. :param value: A", "{ 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type':", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param role: Defines the access", "self.value = kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection of information", "'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type': 'str'}, 'device_id': {'key': 'deviceId',", "available for consumption. :type download_url: str :param archive_base_url: Video archive", "def __init__( self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name',", "state of the private endpoint connection resource. Possible values include:", "'@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'video_name': {'key':", "part of the resource on API requests. :type password: str", "self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None self.encryption = kwargs.get('encryption',", "CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check availability result. :param name_available: Indicates if the", "creation and last modification of the resource. :param created_by: The", "{'key': 'properties.description', 'type': 'str'}, 'type_properties_type': {'key': 'properties.type', 'type': 'str'}, 'flags':", "class directly. Known sub-classes are: VideoSink. All required parameters must", "required parameters must be populated in order to send to", "archived. :type video_name: str :param video_creation_properties: Optional video properties to", "the upstream node in the pipeline which output is used", "applied when processing content for a particular outcome. The topology", "= kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used", "'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self, **kwargs ): super(TlsEndpoint,", "of tags. Resource tags. :type tags: dict[str, str] :param location:", "encoded (2-channel stereo audio at a sampling rate of 48", "sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated", "= kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): \"\"\"Properties for expected token claims.", "for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name", "type 'archive'. If used in a batch topology, this allows", "'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'preset': {'key':", "content token expiration date in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype", "= kwargs.get('title', None) self.description = kwargs.get('description', None) self.segment_length = kwargs.get('segment_length',", "self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model):", "at any time and the new desired retention period will", "{'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type':", "'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipelineUpdate,", "{'key': 'timeSequences', 'type': 'TimeSequenceBase'}, } def __init__( self, **kwargs ):", "self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model):", "Possible values include: \"Archive\", \"File\". :vartype type_properties_type: str or ~video_analyzer.models.VideoType", "): super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset", "not be published, disabling low latency streaming. This is used,", "that the expected use of the topology to be described", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'type_properties_type': {'readonly':", "str :param bitrate_kbps: Bitrate, in kilobits per second or Kbps,", "ES384 or ES512. Possible values include: \"ES256\", \"ES384\", \"ES512\". :type", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology", "filled by server. :type type: str :param issuers: List of", "this node. :type inputs: list[~video_analyzer.models.NodeInput] \"\"\" _validation = { 'type':", "Required. JWT token key id. Validation keys are looked up", "to Azure. :param name: Required. The operation name. :type name:", "~video_analyzer.models.VideoType :ivar flags: Video flags contain information about the available", "'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, } def __init__( self, **kwargs", "_attribute_map = { 'value': {'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key':", "this allows for video and audio to be stored as", "self, **kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description", "use when establishing the remote tunnel. This string is case-sensitive.", "associated with the specified storage account. :param value: Array of", "set to 'true' content will not be archived or recorded.", "with the video content authorization token to download the most", "Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check availability request body. :param name:", "of width or height need be provided. Possible values include:", "to specific video resources. Variables are only populated by the", "for JWT token validation. You probably want to use the", "values include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type name: str or", "**kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id',", "self.expiration = None self.error = None self.parameters = kwargs.get('parameters', None)", "or instructions on how the input video should be processed.", "change how the video sink publishes content via the video", "RTSP and RTP exchange: TCP or HTTP. When using TCP,", "Must be unique within the topology. :type name: str :param", ":vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated for the", "kwargs.get('identity', None) self.status = None class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type for", "MDM namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types: The supported time", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers = kwargs.get('issuers',", "'str'}, } def __init__( self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier", "self.audiences = kwargs.get('audiences', None) self.claims = kwargs.get('claims', None) self.keys =", "self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type:", "sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters must be populated in", "The resource of private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param", "low latency video streaming. Default is 'false'. If set to", "{'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options':", "Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name", "'properties.description', 'type': 'str'}, 'type_properties_type': {'key': 'properties.type', 'type': 'str'}, 'flags': {'key':", "It must be provided in the ISO8601 duration format in", "formatted certificates. All required parameters must be populated in order", "'@type', 'type': 'str'}, 'issuers': {'key': 'issuers', 'type': '[str]'}, 'audiences': {'key':", "of datetime ranges. Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type ranges: str \"\"\"", "is set to \"live\". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\" _validation =", "Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]", "= { 'user_assigned_identity': {'required': True}, } _attribute_map = { 'user_assigned_identity':", "name: str :ivar type: The type of the resource. E.g.", "'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type':", "def __init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None", "format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token: The content", "'#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers = kwargs.get('issuers', None) self.audiences =", "{ 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type':", "**kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message = None", "{ 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type':", "\"P1D\" equals 1 day) and can vary between 1 day", "'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type':", "access for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public", "True}, 'system_data': {'readonly': True}, 'edge_module_id': {'readonly': True}, } _attribute_map =", "} def __init__( self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available =", "connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map = { 'value': {'key':", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'username':", "type: str self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model): \"\"\"The SKU details.", "through live pipelines can be streamed through Azure Video Analyzer", "service consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The", "'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, } _subtype_map =", ":ivar message: The error message. :vartype message: str :ivar target:", "detail. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'required':", "be generated for the same IoT edge module in case", "registration token. The Azure Video Analyzer IoT edge module must", "\"\"\"Base class for topology source nodes. You probably want to", ":vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level", "'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self,", "resources under the Video Analyzer account. Possible values include: \"Enabled\",", "'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key':", "'TimeSequenceBase'}, } def __init__( self, **kwargs ): super(VideoSource, self).__init__(**kwargs) self.type", "keys in Key Vault. Variables are only populated by the", "second or Kbps, at which video should be encoded. If", "= { 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink',", "super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): \"\"\"Access policies help define the authentication", "def __init__( self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey'", "resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id:", "'type': {'key': '@type', 'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'},", "can be played on any standard media player. It is", "description: str :param actions_required: A message indicating if changes on", "kwargs['ranges'] class VideoSink(SinkNodeBase): \"\"\"Video sink in a live topology allows", "node. All required parameters must be populated in order to", "None) self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of private endpoint", "{'readonly': True}, 'type_properties_type': {'readonly': True}, 'flags': {'readonly': True}, 'content_urls': {'readonly':", "'isInUse', 'type': 'bool'}, } def __init__( self, **kwargs ): super(VideoFlags,", "to be used when validating client API access. :type authentication:", "description for the pipeline. :type description: str :ivar state: Current", "self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None self.token =", ":type inputs: list[~video_analyzer.models.NodeInput] \"\"\" _validation = { 'type': {'required': True},", "recorded. This is used, for example, when the topology is", "self.url = kwargs['url'] self.tunnel = kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The", "endpoint connection resource. Possible values include: \"Succeeded\", \"Creating\", \"Deleting\", \"Failed\".", "status: str :ivar error: The error details for the pipeline", "'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl',", "= kwargs.get('sku', None) self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters',", "list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to the next page of", "{'key': '@type', 'type': 'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password':", "A collection of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link:", "datetime ranges. Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type ranges: str \"\"\" _validation", "for topologies where \"kind\" is set to \"live\". :type segment_length:", "'device_id': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "require any updates on the consumer. :type actions_required: str \"\"\"", ":ivar expiration_date: The content token expiration date in ISO8601 format", "self.state = None self.expiration = None self.error = None self.parameters", "items. :param value: A collection of PipelineTopology items. :type value:", "'[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type':", "self.value = kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): \"\"\"Operation details. :param provider:", ":param storage_accounts: The storage accounts for this resource. :type storage_accounts:", "items. :param value: A collection of VideoEntity items. :type value:", "list[~video_analyzer.models.TokenKey] \"\"\" _validation = { 'type': {'required': True}, } _attribute_map", "= kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status',", "the video scaling mode to be applied. Default mode is", ":param type: Required. The type of key used to encrypt", "last_modified_at: ~datetime.datetime \"\"\" _attribute_map = { 'created_by': {'key': 'createdBy', 'type':", "topology. :type name: str :param inputs: Required. An array of", ":param identity: The identities associated to the Video Analyzer resource.", "class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of private endpoint connection associated with the", "class ProxyResource(Resource): \"\"\"The resource model definition for a Azure Resource", "the key used to encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties", "the token. :type name: str :param value: Required. Expected value", "{'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id':", "as a string. The datetime values should follow IS08601, and", "new provisioning token can be generated for the same IoT", "the live pipeline. The allowed range is from 500 to", "'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJob,", "'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "4K to 1280x720. All required parameters must be populated in", "expiration date. :type expiration_date: ~datetime.datetime \"\"\" _validation = { 'expiration_date':", "'str'}, 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, } def __init__( self,", "None self.status = None self.error = None class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline", "the RTSP playback URL will not be published, disabling low", "types.Constant filled by server. :type type: str :param bitrate_kbps: Bitrate,", "the user. Value can be up to 2048 characters long.", "LivePipeline items. :param value: A collection of LivePipeline items. :type", "custom preset for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\" _validation", "that are returned in the response for all Azure Resource", "self).__init__(**kwargs) self.id = None self.name = None self.type = None", "} def __init__( self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity =", "approval/rejection of the connection. :type description: str :param actions_required: A", "server. :type type: str :param bitrate_kbps: Bitrate, in kilobits per", "= None self.principal_id = None class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and password", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__(", "live pipeline operation. :vartype name: str :ivar status: The status", "group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map = { 'integration': {'key':", "error for a failed pipeline job. :param code: The error", "'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type':", "str \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[EdgeModuleEntity]'},", "'current_key_identifier': {'readonly': True}, } _attribute_map = { 'key_identifier': {'key': 'keyIdentifier',", "kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): \"\"\"A custom preset for encoding audio", "that last modified the resource. :type last_modified_by: str :param last_modified_by_type:", "'blob_duration': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name',", "__init__( self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None)", "list[~video_analyzer.models.MetricSpecification] \"\"\" _validation = { 'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly':", "None) class AudioEncoderAac(AudioEncoderBase): \"\"\"A custom preset for encoding audio with", "} def __init__( self, **kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name =", "'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'},", "when sending a request. :ivar name: The metric dimension name.", "target: str :ivar details: The error details. :vartype details: list[~video_analyzer.models.ErrorDetail]", "self.created_at = kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type =", "self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type = None # type:", "'certificates': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "that the pipeline can connect to over TLS transport (data", "self.content_urls = None self.media_info = kwargs.get('media_info', None) self.archival = kwargs.get('archival',", ":vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type", "'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key':", "The reference to an existing pipeline topology defined for real-time", "to change how video is published. These are only allowed", "diagnostic log category name. :vartype name: str :ivar display_name: The", "\"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type name: str or ~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation", "kwargs.get('width', None) self.mode = kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence", "The key may either be versioned (for example https://vault/keys/mykey/version1) or", "None self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model): \"\"\"Video archival properties. :param", "kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None)", "archived or recorded. This is used, for example, when the", "pipeline. Currently supported only with batch pipelines. All required parameters", "import HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model): \"\"\"Common fields that are", "latency video streaming. Default is 'false'. If set to 'true',", "\"Milliseconds\". :vartype unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric", "to expose a WebSocket tunneled RTSP stream. It is available", "_attribute_map = { 'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key':", ":param disable_rtsp_publishing: When set to 'true' the RTSP playback URL", "~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Iot Hub", "then \"disableRtspPublishing\" must be set to 'false'. :type disable_archive: str", "be provided. Possible values include: \"Pad\", \"PreserveAspectRatio\", \"Stretch\". :type mode:", "list of one or more data sinks which allow for", "} def __init__( self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type =", "suitable for different applications and scenarios. Possible values include: \"Archive\",", "segment_length: str :param retention_period: Video retention period indicates how long", "and will be ignored when sending a request. All required", "RTSP endpoint information for Video Analyzer to connect to. This", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, }", "URLs to the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info:", "filled by server. :type type: str :param kid: Required. JWT", "= { 'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'}, } def __init__(", "access to specific video resources. Variables are only populated by", "be references across the topology nodes. * Sources: list of", "less than or equal to 300. If omitted, the encoder", "pipelines in your account. :type bitrate_kbps: int :ivar state: Current", "The metric lock aggregation type. Possible values include: \"Average\", \"Count\",", "self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource): \"\"\"Live pipeline represents a", "will disconnect temporarily from the camera. It will retry to", "managed identity to use when accessing a resource. All required", "{'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport':", "= kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): \"\"\"Base class for nodes. You", "Video Analyzer account is (optionally) encrypted. Variables are only populated", "class PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private Endpoint resource. Variables are only populated", "AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type for all audio encoder presets, which define", "'type': 'str'}, } def __init__( self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs)", "class for topology processor nodes. You probably want to use", "The datetime values should follow IS08601, and the sum of", "None) class IotHub(msrest.serialization.Model): \"\"\"The IoT Hub details. Variables are only", "of expected token issuers. Token issuer is valid if it", "str \"\"\" _attribute_map = { 'code': {'key': 'code', 'type': 'str'},", "} def __init__( self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value =", "account. :vartype expiration: ~datetime.datetime :ivar error: Details about the error,", "True}, 'password': {'required': True}, } _attribute_map = { 'type': {'key':", "description: The reason for approval/rejection of the connection. :type description:", "files (segments) which are persisted to storage. Smaller segments provide", "to re-establish connection (with exponential backoff), checking to see if", "'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime',", "policy. Possible values include: \"Reader\". :type role: str or ~video_analyzer.models.AccessPolicyRole", "= kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection of information about", "when sending a request. :ivar client_id: The client ID. :vartype", "Video archival properties. :type archival: ~video_analyzer.models.VideoArchival \"\"\" _validation = {", "this class directly. Known sub-classes are: RtspSource, VideoSource. All required", "must be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage).", "name. Possible values include: \"Live_S1\", \"Batch_S1\". :type name: str or", "body. :param name: The name of the resource for which", "private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection", ":type value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map = { 'value': {'key': 'value',", "sending a request. :ivar code: The error code. :vartype code:", "values include: \"Http\", \"Tcp\". :type transport: str or ~video_analyzer.models.RtspTransport :param", ":param description: The operation description. :type description: str \"\"\" _attribute_map", "provide lower archive playback latency but generate larger volume of", "{ 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__( self, **kwargs ):", "\"\"\" _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature':", "= { 'type': {'key': '@type', 'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder',", "'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name',", "to send to Azure. :param name: Required. Operation identifier. :type", "properties based on the current video state. All required parameters", "URL. :type large: str \"\"\" _attribute_map = { 'small': {'key':", "kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): \"\"\"The IoT Hub details. Variables are", "'str'}, } def __init__( self, **kwargs ): super(Sku, self).__init__(**kwargs) self.name", "type: Required. The identity type. :type type: str :param user_assigned_identities:", "= { 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__(", "{'key': 'userAssignedIdentity', 'type': 'str'}, } def __init__( self, **kwargs ):", "of Operation items. :type value: list[~video_analyzer.models.Operation] \"\"\" _attribute_map = {", "~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource. :type", "None) self.scale = kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): \"\"\"A custom preset", "super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None)", "} def __init__( self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date =", "= { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name',", ":param url: Required. The endpoint URL for Video Analyzer to", "the available video actions and its dynamic properties based on", "for the Azure Video Analyzer IoT edge module. All required", "'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type':", "error message. :vartype message: str :ivar target: The error target.", "input parameters to generate registration token for the Azure Video", "for all Azure Resource Manager APIs to return error details", "'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "= kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model):", "MP4 files. Variables are only populated by the server, and", "a topology is composed of the following: * Parameters: list", "exchanged through long lived HTTP connections, and the RTP packages", "has 'tags' and a 'location'. Variables are only populated by", "of private endpoint connection operation. All required parameters must be", "= None class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input parameters to generate registration", "{ 'type': {'required': True}, 'name': {'required': True}, } _attribute_map =", "str :param frame_rate: The frame rate (in frames per second)", "'[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def", "probably want to use the sub-classes and not this class", "{'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, } def __init__( self, **kwargs ):", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly':", "Endpoint. :vartype id: str \"\"\" _validation = { 'id': {'readonly':", "principal ID. :vartype principal_id: str \"\"\" _validation = { 'client_id':", "items. :param value: A collection of AccessPolicyEntity items. :type value:", "a new video resource needs to be created on the", "or exported to other destinations. Variables are only populated by", "_validation = { 'name': {'readonly': True}, 'status': {'readonly': True}, 'error':", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'ranges': {'key':", "self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None self.token =", "{'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'}, }", "code. :vartype code: str :ivar message: The error message. :vartype", "link resource group id. :vartype group_id: str :ivar required_members: The", "any updates on the consumer. :type actions_required: str \"\"\" _attribute_map", "} def __init__( self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id =", "'type': 'str'}, } def __init__( self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs)", "'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration',", "id: Required. The IoT Hub resource identifier. :type id: str", "_attribute_map = { 'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'}, } def", "video title provided by the user. Value can be up", "self.reason = kwargs.get('reason', None) self.message = kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model):", "kwargs.get('resource', None) self.operation = kwargs.get('operation', None) self.description = kwargs.get('description', None)", "associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The", "HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf)", "~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer", "{'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__( self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs)", "When set to 'true' the RTSP playback URL will not", "True}, 'current_key_identifier': {'readonly': True}, } _attribute_map = { 'key_identifier': {'key':", "of the resource for which availability needs to be checked.", "system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated for the instance", "values include: \"Pad\", \"PreserveAspectRatio\", \"Stretch\". :type mode: str or ~video_analyzer.models.VideoScaleMode", "= kwargs['name'] self.type = kwargs['type'] self.description = kwargs.get('description', None) self.default", "player. It is available when the video type is 'file'", "'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'}, } def __init__( self,", "error details for the live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail", "an endpoint that the pipeline can connect to over TLS", "match the quality of the input video. :type bitrate_kbps: str", "The error details for the pipeline job operation. :vartype error:", "that data is being received. For example, video recording may", "name: Required. The operation name. :type name: str :param display:", "{'key': 'tunnel', 'type': 'TunnelBase'}, } def __init__( self, **kwargs ):", "as long as the module is able to periodically connect", ":ivar id: The ARM identifier for Private Endpoint. :vartype id:", "~video_analyzer.models.Sku :param description: An optional description of the pipeline topology.", "exported to other destinations. Variables are only populated by the", "name: str :param video_name: Required. Name of the Video Analyzer", "\"Total\". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation", "None class SinkNodeBase(NodeBase): \"\"\"Base class for topology sink nodes. You", "} def __init__( self, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name =", "Analyzer relies on tables, queues, and blobs. The primary storage", ":type next_link: str \"\"\" _attribute_map = { 'value': {'key': 'value',", "'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount',", "parameters: List of the topology parameter declarations. Parameters declared here", "'system_data': {'readonly': True}, 'group_id': {'readonly': True}, 'required_members': {'readonly': True}, }", "than zero, and less than or equal to 300. If", "{'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id':", "_attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key':", "for consumption. :type download_url: str :param archive_base_url: Video archive streaming", "returned as part of the resource on API requests. :type", "connections, and the RTP packages are interleaved in the HTTP", "str :param name: Required. Node name. Must be unique within", "Possible values include: \"String\", \"SecretString\", \"Int\", \"Double\", \"Bool\". :type type:", "Parameters declared here can be referenced throughout the topology nodes", "how the Video Analyzer account is (optionally) encrypted. Variables are", "} def __init__( self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type =", "_validation = { 'service_specification': {'readonly': True}, } _attribute_map = {", "True}, 'principal_id': {'readonly': True}, } _attribute_map = { 'client_id': {'key':", "the collection contains too many results to return in one", "metric unit. Possible values include: \"Bytes\", \"Count\", \"Milliseconds\". :vartype unit:", "or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation type. Possible values", "types.Constant filled by server. :type type: str :param issuers: List", "{ 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self,", "self, **kwargs ): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): \"\"\"Access policies help", "content should be processed. You probably want to use the", "{'required': True}, 'ranges': {'required': True}, } _attribute_map = { 'type':", ":type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link to the next", "\"\"\" _attribute_map = { 'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason':", "): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error", "class directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required", "video content authorization token on any compatible DASH or HLS", "network access for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state':", "archive segments which are intended to be kept in storage.", "using IoT Hub device information. All required parameters must be", "'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def", "looked up based on the key id present on the", "'type': {'key': '@type', 'type': 'str'}, 'username': {'key': 'username', 'type': 'str'},", "needs to be created on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties", "**kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role', None) self.authentication =", "\"ManagedIdentity\", \"Key\". :type created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at: The", "~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate. :type x: str :param", "or ~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map = { 'height': {'key': 'height', 'type':", "**kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature =", "'[VideoAnalyzer]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value", "\"\"\" _validation = { 'type': {'required': True}, 'status': {'readonly': True},", "Possible values include: \"Enabled\", \"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess", "and provider. :param status: Indicates whether the connection has been", "'archive' and preview images are enabled. :param small: Low resolution", "} def __init__( self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint =", "self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type:", "'type': 'str'}, } def __init__( self, **kwargs ): super(TokenClaim, self).__init__(**kwargs)", "super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates =", "of individual content files (segments) which are persisted to storage.", "one key. :type keys: list[~video_analyzer.models.TokenKey] \"\"\" _validation = { 'type':", "lead to errors when uploading content to the archive. Default", "published via a video resource of type 'file'. All required", "certificates. One certificate per entry. :type certificates: list[str] \"\"\" _validation", "expiration date of the registration token. The Azure Video Analyzer", "https://vault/keys/mykey/version1) or reference a key without a version (for example", "by server. :type type: str :param iot_hub_name: Required. Name of", ":ivar status: The current status of the Key Vault mapping.", "which must be present on the token. :type name: str", "True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key':", "kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update", "to download the video MP4 file. The resulting MP4 file", ":type type: str :param bitrate_kbps: Bitrate, in kilobits per second", "are not defined in the pipelines. All required parameters must", "name: The diagnostic log category name. :vartype name: str :ivar", "user. Value can be up to 256 characters long. :type", "'systemData', 'type': 'SystemData'}, 'role': {'key': 'properties.role', 'type': 'str'}, 'authentication': {'key':", "by server. :type type: str :param issuers: List of expected", "remote tunnel. This string is case-sensitive. :type device_id: str \"\"\"", "Operation items. :type value: list[~video_analyzer.models.Operation] \"\"\" _attribute_map = { 'value':", "clear transport (no encryption in transit). All required parameters must", "= { 'expiration_date': {'readonly': True}, 'token': {'readonly': True}, } _attribute_map", "The client ID. :vartype client_id: str :ivar principal_id: The principal", "the same processing is to be applied across all the", "self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name =", "effective within 24 hours. :type retention_period: str \"\"\" _attribute_map =", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'kind': {'required':", "'VideoPublishingOptions'}, } def __init__( self, **kwargs ): super(VideoSink, self).__init__(**kwargs) self.type", "audio to be stored as a file, and published via", "sending a request. :ivar client_id: The client ID. :vartype client_id:", "actions_required: A message indicating if changes on the service provider", "Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\"", "= { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__(", ":vartype status: str \"\"\" _validation = { 'id': {'required': True},", "): super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name", "self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkResource(Resource): \"\"\"A private link", "in kilobits per second or Kbps, at which video should", "'description': {'key': 'properties.description', 'type': 'str'}, 'type_properties_type': {'key': 'properties.type', 'type': 'str'},", "{'readonly': True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly':", "most recent still image from the video archive in different", "than 30 days will be periodically deleted. This value can", "): super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials", "not take effect if the video already exists. :param title:", "where \"kind\" is set to \"live\". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\"", "The encoder preset, which defines the recipe or instructions on", "{'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type':", "Optional video title provided by the user. Value can be", "'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint',", "timestamp of resource last modification (UTC). :type last_modified_at: ~datetime.datetime \"\"\"", "__init__( self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None)", "'type': 'str'}, } def __init__( self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs)", "__init__( self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None self.principal_id", ":type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video", "{ 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type':", "Required. The ID of the storage account resource. Video Analyzer", "properties of the key used to encrypt the account. :type", "\"Application\", \"ManagedIdentity\", \"Key\". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at:", ":type name: str :param value: Required. Expected value of the", ":type kind: str or ~video_analyzer.models.Kind :param sku: Describes the properties", "class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed identity for the Video Analyzer resource.", "self.value = kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed identity for", "preset for encoding audio with the AAC codec. All required", "Required. Topology kind. Possible values include: \"Live\", \"Batch\". :type kind:", "describes an endpoint that the pipeline can connect to over", "str \"\"\" _validation = { 'name': {'required': True}, 'value': {'required':", "Operation identifier. :type name: str :param id: Operation resource ID.", "Endpoint resource. Variables are only populated by the server, and", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName',", ":param video_publishing_options: Options to change how the video sink publishes", "resource group id. :vartype group_id: str :ivar required_members: The private", "= kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): \"\"\"Describes", "code: The error code. :type code: str :param message: The", "resolution of the encoded video. If omitted, the encoder uses", "self.is_data_action = kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model):", "None self.parameters = kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection of", "of the operation. :type origin: str :param properties: Operation properties", "super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None self.required_members = None self.required_zone_names =", "The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification \"\"\" _validation = {", "for content from a Video Analyzer video resource to be", "self.validation_options = kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for controlling the", "The topology should be defined according to the scenario to", "'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key':", "**kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource): \"\"\"Live", "super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection", "the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change", "'expiration_date': {'required': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate',", "description: str \"\"\" _attribute_map = { 'provider': {'key': 'provider', 'type':", "next page of the collection (when the collection contains too", "are: UsernamePasswordCredentials. All required parameters must be populated in order", "= kwargs.get('audiences', None) self.claims = kwargs.get('claims', None) self.keys = kwargs.get('keys',", "True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map =", "class ServiceSpecification(msrest.serialization.Model): \"\"\"The service metric specifications. Variables are only populated", "of authentication keys which will be auto-rotated as long as", "class directly. Known sub-classes are: RtspSource, VideoSource. All required parameters", "Resource Manager tracked top level resource which has 'tags' and", "str :param width: The desired output video width. :type width:", "identity's resource identifier to use when accessing a resource. :type", "of the Iot Hub mapping. :vartype status: str \"\"\" _validation", "from cameras. * Processors: list of nodes which perform data", "'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type':", "'id': {'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'},", "or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint information for Video", "can connect to over clear transport (no encryption in transit).", "unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation type.", "'[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type':", "azure.core.exceptions import HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model): \"\"\"Common fields that", "def __init__( self, **kwargs ): super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink'", "'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key':", "'[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type':", "sub-classes and not this class directly. Known sub-classes are: UsernamePasswordCredentials.", "UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint describes an endpoint that the pipeline can", "'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},", "= kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): \"\"\"A", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__( self,", "the video already exists. :param title: Optional title provided by", ":type tags: dict[str, str] :param location: Required. The geo-location where", "{'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state':", "its dynamic properties based on the current video state. :vartype", "this class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters", "= kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources',", "to Azure. :param name: Required. Operation identifier. :type name: str", "{'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, }", "super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset =", "def __init__( self, **kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title',", "only one of width or height need be provided. Possible", "'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type':", "into a pipeline. Currently supported only with batch pipelines. All", "per second or Kbps, at which video should be encoded.", "'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } _subtype_map", "kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): \"\"\"A collection", "CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check availability request body. :param name: The name", "'type': {'required': True}, 'name': {'required': True}, 'endpoint': {'required': True}, }", "node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder preset,", "a default value must be defined. Topology parameters with a", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'group_id': {'readonly': True},", "'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__( self, **kwargs ): super(SourceNodeBase,", "(eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token: The content token", "processing is to be applied across all the cameras. Individual", "): super(TunnelBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class", "information for Video Analyzer to connect to. This contains the", "'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key':", "} def __init__( self, **kwargs ): super(TokenKey, self).__init__(**kwargs) self.type =", "for low latency video streaming. Default is 'false'. If set", "'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__(", "certificate chain trust validation to be skipped. Default is 'false'.", "check availability result. :param name_available: Indicates if the resource name", ":type frame_rate: str :param scale: Describes the resolution of the", "retention period indicates the maximum age of the video archive", "handshake between IoT edge module and the cloud. After the", "str \"\"\" _validation = { 'name': {'required': True}, 'type': {'required':", "type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\".", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name = kwargs['video_name']", "qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype", "tier: str or ~video_analyzer.models.SkuTier \"\"\" _validation = { 'name': {'required':", "'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type':", "signature must match exactly one key. :type keys: list[~video_analyzer.models.TokenKey] \"\"\"", "nodes. You probably want to use the sub-classes and not", "causes the certificate chain trust validation to be skipped. Default", "'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description',", "if disableArchive is set to true, then no content is", "The status of the live pipeline operation. :vartype status: str", "pipelines or can be created by exporting sequences from existing", "the RTSP camera exceeds this capacity, then the service will", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key':", "the parameter declared in the pipeline topology. :type name: str", "network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer", "Analyzer IoT edge module through the Azure IoT Edge module", ":type expiration_date: ~datetime.datetime \"\"\" _validation = { 'expiration_date': {'required': True},", "system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information.", "'[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind", "_validation = { 'type': {'required': True}, 'name': {'required': True}, 'endpoint':", "The geo-location where the resource lives. :type location: str :param", "self.integration = kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion', None) self.consumption =", "playback URL will not be published, disabling low latency streaming.", "service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change how", "available when the video type is 'archive' and a live,", "the same IoT edge module in case the module state", "~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter values", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def", "kwargs['name'] class ProcessorNodeBase(NodeBase): \"\"\"Base class for topology processor nodes. You", "'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__(", "the service. These will not take effect if the video", "the encoded video. The value must be greater than zero,", "the consumer. :type actions_required: str \"\"\" _attribute_map = { 'status':", "sub-classes and not this class directly. Known sub-classes are: TlsEndpoint,", "self.type = None # type: Optional[str] self.credentials = kwargs['credentials'] self.url", "certificate subject name validation to be skipped. Default is 'false'.", "OperationCollection(msrest.serialization.Model): \"\"\"A collection of Operation items. :param value: A collection", "registration token for the Azure Video Analyzer IoT edge module.", "{'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def __init__( self, **kwargs ):", "a TLS connection. A null list designates that Azure Video", ":type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access for ingestion", ":type name: str :param value: Parameter value to be applied", "None) class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource management error additional info. Variables", "} def __init__( self, **kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type =", "send to Azure. :param type: Required. The type of key", "tokens. Having multiple keys allow for seamless key rotation of", "kept in storage. Value must be specified in ISO8601 duration", "period indicates how long the video is kept in storage.", "an Azure Resource Manager tracked top level resource which has", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'type_properties_type': {'readonly': True}, 'flags':", "{ 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True},", "whether the connection has been Approved/Rejected/Removed by the owner of", "'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def __init__(", "Azure Video Analyzer Player Widget or compatible players. Exported videos", "batch pipelines. All required parameters must be populated in order", "{'key': 'type', 'type': 'str'}, 'info': {'key': 'info', 'type': 'object'}, }", ":vartype edge_module_id: str \"\"\" _validation = { 'id': {'readonly': True},", "'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type':", "'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'},", "self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.segment_length =", "'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key':", "'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key':", "JWT token key id. Validation keys are looked up based", "Vault mapping. :vartype status: str \"\"\" _validation = { 'type':", "super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type'] self.description =", "{'key': 'width', 'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'}, }", "'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__(", "'type': {'key': '@type', 'type': 'str'}, 'ranges': {'key': 'ranges', 'type': 'str'},", "None) class CredentialsBase(msrest.serialization.Model): \"\"\"Base class for credential objects. You probably", "instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List", "class directly. Known sub-classes are: JwtAuthentication. All required parameters must", "'endpoint', 'type': 'EndpointBase'}, } def __init__( self, **kwargs ): super(RtspSource,", "'str'}, } def __init__( self, **kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url", "self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): \"\"\"The IoT Hub", "kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated with", "~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties. :type archival: ~video_analyzer.models.VideoArchival \"\"\"", "kwargs.get('identity', None) self.status = None class SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining to", "self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role', None) self.authentication", "when they are not defined in the pipelines. All required", "destinations. Variables are only populated by the server, and will", "~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'readonly': True}, 'status': {'readonly':", "def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider',", "trust validation to be skipped. Default is 'false'. :type ignore_signature:", "self.retention_period = kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content token grants", "'value', 'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def", "user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation = { 'type': {'required': True},", "specify a value. :type default: str \"\"\" _validation = {", "base URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF:", "not this class directly. Known sub-classes are: EncoderProcessor. All required", "video_creation_properties: Optional video properties to be used in case a", "__init__( self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' #", "nodes enable external data to be ingested by the pipeline.", "Optional[str] self.credentials = kwargs['credentials'] self.url = kwargs['url'] self.tunnel = kwargs.get('tunnel',", "): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video", "**kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name = None self.display_name = None", "'str'}, } def __init__( self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity", "used to change how video is published. These are only", "for a unique RTSP camera. Variables are only populated by", "value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link to the next page", "\"\"\"A list of PEM formatted certificates. All required parameters must", "\"\"\"Unsecured endpoint describes an endpoint that the pipeline can connect", "the Video Analyzer video resource to be used as the", ":ivar blob_duration: The time range for requests in each blob.", "when sending a request. :ivar type: The additional info type.", "private link resources. :param value: Array of private link resources.", "Whether or not public network access is allowed for resources", "module. :vartype edge_module_id: str \"\"\" _validation = { 'id': {'readonly':", "being referenced, doesn't necessarily indicate that data is being received.", "can later be defined in individual instances of the pipeline.", "kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection", ":param scale: Describes the resolution of the encoded video. If", "seconds to 5 minutes, in 30 seconds increments. Changing this", "token header. :type kid: str \"\"\" _validation = { 'type':", ":type kind: str or ~video_analyzer.models.Kind :param sku: Required. Describes the", "\"\"\"\"Video content token grants access to the video content URLs.\".", "RsaTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated with RSA algorithm.", "the camera. It will retry to re-establish connection (with exponential", "Possible values include: \"Reader\". :type role: str or ~video_analyzer.models.AccessPolicyRole :param", "None self.parameters = kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic log", "value to be applied on this specific pipeline. :type value:", "{'required': True}, 'endpoint': {'required': True}, } _attribute_map = { 'type':", "VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link: A link to", "self.parameters = kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection of PipelineJob", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData \"\"\" _validation =", "'#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes", "def __init__( self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code',", "'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__(", "refer to different values, such as individual cameras' RTSP endpoints", "{'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink':", "(UTC). :type last_modified_at: ~datetime.datetime \"\"\" _attribute_map = { 'created_by': {'key':", "different cameras, as long as the same processing is to", "class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type for all encoder presets, which define", "'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzer,", "_validation = { 'name': {'required': True}, } _attribute_map = {", "self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control =", "{'key': 'videoName', 'type': 'str'}, 'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'}, }", "not have tags and a location. Variables are only populated", "file is available for consumption. :type download_url: str :param archive_base_url:", "Required. Name of the parameter declared in the pipeline topology.", "'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'},", "via a video resource of type 'file'. All required parameters", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, }", "'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type':", "defines the recipe or instructions on how the input content", "Approved/Rejected/Removed by the owner of the service. Possible values include:", "'ranges': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "): super(MetricSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.display_description", "self.aggregation_type = None self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)", "code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import", "source nodes. Source nodes enable external data to be ingested", "kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence of datetime ranges as", "the collection (when the collection contains too many results to", "None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at', None) class", "= kwargs.get('value', None) class PemCertificateList(CertificateSource): \"\"\"A list of PEM formatted", "{'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self, **kwargs ):", "Video Analyzer Player Widget or compatible players. Exported videos can", "**kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None) self.archive_base_url =", "ranges as a string. The datetime values should follow IS08601,", "{ 'type': {'required': True}, 'kid': {'required': True}, } _attribute_map =", "'flags': {'readonly': True}, 'content_urls': {'readonly': True}, } _attribute_map = {", "or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock aggregation type. Possible", "length of individual video files (segments) which are persisted to", "'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, } def", "self.kind = kwargs.get('kind', None) self.sku = kwargs.get('sku', None) self.description =", "* Sources: list of one or more data sources nodes", "True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map =", "\"Total\". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric", "an RTSP camera or generic RTSP server to be ingested", "'type': 'str'}, 'n': {'key': 'n', 'type': 'str'}, 'e': {'key': 'e',", "True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'},", "for topology source nodes. You probably want to use the", "def __init__( self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", "'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopology,", "= kwargs['kind'] self.sku = kwargs['sku'] self.description = kwargs.get('description', None) self.parameters", "Defines the access level granted by this policy. Possible values", "in conjunction with the video content authorization token to download", "details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype additional_info:", "the pipeline does not specify a value. :type default: str", "and not this class directly. Known sub-classes are: AudioEncoderAac. All", "self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error =", "to send to Azure. :param type: Required. The identity type.", "The user assigned managed identity's resource identifier to use when", "self.type = None # type: Optional[str] class TokenKey(msrest.serialization.Model): \"\"\"Key properties", "in order to send to Azure. :param node_name: Required. The", "for Video Analyzer to connect to. This contains the required", "a failed pipeline job. :param code: The error code. :type", "the video type is 'archive' and a live, low-latency feed", "\"String\", \"SecretString\", \"Int\", \"Double\", \"Bool\". :type type: str or ~video_analyzer.models.ParameterType", "last_modified_by: str :param last_modified_by_type: The type of identity that last", "'TunnelBase'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'}", "time and the new desired retention period will be effective", "Video analyzer IoT edge module to be initialized and authorized", "} def __init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name =", "data sinks which allow for data to be stored or", "kwargs.get('description', None) self.state = None self.expiration = None self.error =", "'str'}, 'flags': {'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type':", "None) self.type = kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check availability", "None self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model): \"\"\"A metric emitted by", "): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates", "specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification \"\"\" _validation = { 'service_specification': {'readonly':", "use when accessing a resource. :type user_assigned_identity: str \"\"\" _validation", "True}, 'name': {'required': True}, 'inputs': {'required': True}, 'video_name': {'required': True},", "type: str :param credentials: Required. Credentials to be presented to", "{'key': 'y', 'type': 'str'}, } def __init__( self, **kwargs ):", "= kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access',", "# type: str self.alg = kwargs['alg'] self.x = kwargs['x'] self.y", "'is_in_use': {'key': 'isInUse', 'type': 'bool'}, } def __init__( self, **kwargs", "{'required': True}, } _attribute_map = { 'endpoint_url': {'key': 'endpointUrl', 'type':", "'value': {'key': 'value', 'type': '[VideoAnalyzer]'}, } def __init__( self, **kwargs", "__init__( self, **kwargs ): super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' #", "\"Inactive\", \"Activating\", \"Active\", \"Deactivating\". :vartype state: str or ~video_analyzer.models.LivePipelineState :param", "how long the video is kept in storage. Value must", "specified resources under the Video Analyzer account. Possible values include:", "desired output video height. :type height: str :param width: The", "'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt', 'type':", "metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar", "(read-only). Possible values include: \"Processing\", \"Canceled\", \"Completed\", \"Failed\". :vartype state:", "Operation items. :param value: A collection of Operation items. :type", "cloud. A new provisioning token can be generated for the", "values include: \"Enabled\", \"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param", ":type bitrate_kbps: int :ivar state: Current state of the pipeline", "playback latency but generate larger volume of storage transactions. Larger", "edge_module_id: str \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(Endpoint,", "'str'}, } def __init__( self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value", "self.name = kwargs['name'] self.type = kwargs['type'] self.description = kwargs.get('description', None)", "class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type for all video encoding presets, which", "omitted, encoder sets it automatically to try and match the", ":ivar status: The current status of the Iot Hub mapping.", "\"Activating\", \"Active\", \"Deactivating\". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters:", "kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): \"\"\"Video flags", "class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token properties. A provisioning token allows for", "SecureIotDeviceRemoteTunnel. All required parameters must be populated in order to", "~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of resource last modification (UTC).", "origin: Origin of the operation. :type origin: str :param properties:", "# type: str self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options',", "be ignored when sending a request. :ivar name: The metric", "analyzer account. :param integration: Public network access for integration group.", "optionally have default values to be used when they are", "cloud. After the initial handshake, the IoT edge module will", "'hasData', 'type': 'bool'}, 'is_in_use': {'key': 'isInUse', 'type': 'bool'}, } def", "'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__(", ":param name: Required. Name of the parameter declared in the", "and RTP exchange: TCP or HTTP. When using TCP, the", "The maximum bitrate, in kilobits per second or Kbps, at", "self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration', None) self.ingestion", "{'key': 'e', 'type': 'str'}, } def __init__( self, **kwargs ):", "EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor allows for encoding of the input content.", "type: str :param iot_hub_name: Required. Name of the IoT Hub.", ":param public_network_access: Whether or not public network access is allowed", "active pipeline. The fact that is being referenced, doesn't necessarily", "'type': '[VideoAnalyzer]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs)", "of trusted certificate authorities when authenticating a TLS connection. A", "a version (for example https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier:", "Newly created videos have this value set to false. :type", "Required. Name of the Video Analyzer video resource to be", "= None self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model): \"\"\"A metric emitted", "'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data':", "{'key': 'type', 'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, }", "archiving is enabled. :type archive_base_url: str :param rtsp_tunnel_url: Video low-latency", "'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type':", "self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase):", "\"Stretch\". :type mode: str or ~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map = {", "\"kind\" is set to \"live\". :type retention_period: str \"\"\" _attribute_map", "using the encoder processor. All required parameters must be populated", "): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): \"\"\"The", "self.key_identifier = kwargs['key_identifier'] self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input", "'str'}, } def __init__( self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id", "def __init__( self, **kwargs ): super(Sku, self).__init__(**kwargs) self.name = kwargs['name']", "str :ivar name: The name of the resource. :vartype name:", "provisioning token can be generated for the same IoT edge", "of one or more data sources nodes such as an", "= kwargs.get('title', None) self.description = kwargs.get('description', None) self.type_properties_type = None", "of the chosen video segment length. It is available when", "= { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self, **kwargs", "'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key':", "_attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key':", "self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model):", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'issuers': {'key':", "a request. :ivar type: The additional info type. :vartype type:", "{'readonly': True}, 'content_urls': {'readonly': True}, } _attribute_map = { 'id':", "'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key':", ":type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this", "{'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'n': {'required':", "query string parameter. The token is specific to a single", "The name of the upstream node in the pipeline which", ":ivar status: The status of the pipeline job operation. :vartype", "level network access control. :param public_network_access: Whether or not public", "\"\"\" _attribute_map = { 'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type':", "parameters to generate registration token for the Azure Video Analyzer", "{'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs)", "set to \"live\". :param disable_archive: When set to 'true' content", "# type: str self.username = kwargs['username'] self.password = kwargs['password'] class", "which perform data analysis or transformations. * Sinks: list of", "blob_duration: The time range for requests in each blob. :vartype", "~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why the given name is", ":param can_stream: Required. Value indicating whether or not the video", "to try and match the quality of the input video.", "the private endpoint connection resource. Possible values include: \"Succeeded\", \"Creating\",", "are: VideoEncoderH264. All required parameters must be populated in order", "{'required': True}, 'inputs': {'required': True}, 'preset': {'required': True}, } _attribute_map", "'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "edge module and the cloud. After the initial handshake, the", "LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection of LivePipeline items. :param value: A collection", "a live topology allows for video and audio to be", "class TokenKey(msrest.serialization.Model): \"\"\"Key properties for JWT token validation. You probably", "self.video_publishing_options = kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): \"\"\"Video source allows for", "_validation = { 'type': {'required': True}, 'kid': {'required': True}, 'alg':", "preset for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\" _validation =", "use when authenticating a TLS connection. By default, strict validation", "Sources: list of one or more data sources nodes such", "up based on the key id present on the JWT", "'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key':", "to export metric to shoebox. :vartype to_be_exported_for_shoebox: bool \"\"\" _validation", "tier. Possible values include: \"Standard\". :vartype tier: str or ~video_analyzer.models.SkuTier", "'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, } def __init__(", "'key_identifier': {'required': True}, 'current_key_identifier': {'readonly': True}, } _attribute_map = {", "collection contains too many results to return in one response).", "super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity", "applies to data-plane. :type is_data_action: bool :param action_type: Indicates the", "It is available when the video type is 'file' and", "None) class VideoFlags(msrest.serialization.Model): \"\"\"Video flags contain information about the available", "modification of the resource. :param created_by: The identity that created", "**kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None self.required_members = None", "**kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name = None self.display_name = None", "list[~video_analyzer.models.TokenClaim] :param keys: List of keys which can be used", "height need be provided. Possible values include: \"Pad\", \"PreserveAspectRatio\", \"Stretch\".", "will be ignored when sending a request. :ivar code: The", "Currently supported only with batch pipelines. All required parameters must", "self.status = kwargs.get('status', None) self.description = kwargs.get('description', None) self.actions_required =", "class SourceNodeBase(NodeBase): \"\"\"Base class for topology source nodes. You probably", "} def __init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type =", "'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type':", "exactly one key. :type keys: list[~video_analyzer.models.TokenKey] \"\"\" _validation = {", "time. :type end_time: str :param status: Operation status. :type status:", "Origin of the operation. :type origin: str :param properties: Operation", "on API requests. :type password: str \"\"\" _validation = {", "= kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None", "StorageAccount(msrest.serialization.Model): \"\"\"The details about the associated storage account. Variables are", "self.display_description = None self.unit = None self.aggregation_type = None self.lock_aggregation_type", "from existing captured video through a pipeline job. Videos ingested", "archive_base_url: str :param rtsp_tunnel_url: Video low-latency streaming URL. The live", ":type type: str :param credentials: Required. Credentials to be presented", "defined for real-time content processing. When activated, this live pipeline", "kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor allows for encoding of the", "class EccTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated with Elliptical", "Known sub-classes are: AudioEncoderAac. All required parameters must be populated", "self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class AuthenticationBase(msrest.serialization.Model): \"\"\"Base class", "self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks =", "Required. The IoT device id to use when establishing the", "which allows for content to be ingested from cameras. *", "properties. :type archival: ~video_analyzer.models.VideoArchival \"\"\" _validation = { 'id': {'readonly':", "description: An optional description of the pipeline topology. It is", "JWT token header. :type kid: str :param alg: Required. Elliptical", "'token': {'readonly': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate',", "Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData", "None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None self.parameters =", "display name. :vartype display_name: str :ivar display_description: The metric display", "can be streamed through Azure Video Analyzer Player Widget or", "{'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control':", "medium: str :param large: High resolution preview image URL. :type", "__init__( self, **kwargs ): super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' #", "transport utilized by the RTSP and RTP exchange: TCP or", "**kwargs ): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): \"\"\"Access policies help define", "items. :type value: list[~video_analyzer.models.Operation] \"\"\" _attribute_map = { 'value': {'key':", "\"\"\" _validation = { 'type': {'required': True}, } _attribute_map =", "name: str \"\"\" _validation = { 'type': {'required': True}, 'name':", "used to encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity:", "def __init__( self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value',", "{'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error':", "super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class UserAssignedManagedIdentity(msrest.serialization.Model):", "_validation = { 'type': {'required': True}, 'kid': {'required': True}, }", "as a file, and published via a video resource of", "id: Fully qualified resource ID for the resource. Ex -", "self.blob_duration = None class MetricDimension(msrest.serialization.Model): \"\"\"A metric dimension. Variables are", "(with exponential backoff), checking to see if the camera bitrate", "'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections',", "IoT edge module must be initialized and connected to the", ":type user_assigned_identity: str \"\"\" _validation = { 'user_assigned_identity': {'required': True},", "Analyzer account. Variables are only populated by the server, and", ":type key_identifier: str :ivar current_key_identifier: The current key used to", "'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type':", "to the archive. Default value is 30 seconds. This property", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def", "'file'. All required parameters must be populated in order to", "where \"kind\" is set to \"live\". :type segment_length: str :param", "super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None)", "portions of archived content. Variables are only populated by the", "'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'},", "self, **kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description", "'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__(", "can be reused across many different cameras, as long as", "'default', 'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDeclaration,", "= { 'type': {'required': True}, 'name': {'required': True}, 'video_name': {'required':", "{'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__( self, **kwargs ):", ":param id: Required. The IoT Hub resource identifier. :type id:", "class VideoEntity(ProxyResource): \"\"\"Represents a video resource within Azure Video Analyzer.", "ES256, ES384 or ES512. Possible values include: \"ES256\", \"ES384\", \"ES512\".", "'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, } def", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map", "Iot Hub mapping. :vartype status: str \"\"\" _validation = {", "str :param scale: Describes the resolution of the encoded video.", "of a new or existing video resource used to capture", "= kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection of PipelineTopology items.", "'kid': {'key': 'kid', 'type': 'str'}, } _subtype_map = { 'type':", "connection. A null list designates that Azure Video Analyzer's list", "self.error = kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level network access", ":type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this", "send to Azure. :param name: Required. Name of the parameter", "{'key': 'properties.edgeModuleId', 'type': 'str'}, } def __init__( self, **kwargs ):", "ingested through live pipelines can be streamed through Azure Video", "_attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'}, 'identity': {'key':", "network access for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion:", "A null list designates that Azure Video Analyzer's list of", "properties to be used in case a new video resource", "self.tier = None class StorageAccount(msrest.serialization.Model): \"\"\"The details about the associated", "ignored when sending a request. All required parameters must be", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'}", "deleted. This value can be updated at any time and", "self).__init__(**kwargs) self.code = None self.message = None self.target = None", "\"\"\" _validation = { 'key_identifier': {'required': True}, 'current_key_identifier': {'readonly': True},", "be used in conjunction with the video content authorization token", "storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints:", "id: str :param identity: Required. The IoT Hub identity. :type", "content can be automatically played by the Azure Video Analyzer", "self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None) self.reason = kwargs.get('reason', None) self.message", "(UTC). :type created_at: ~datetime.datetime :param last_modified_by: The identity that last", "None) class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed identity for the Video Analyzer", "None) self.keys = kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): \"\"\"The details for", "is used as input of the current node. :type node_name:", "kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): \"\"\"The video", "endpoint. :type endpoint_url: str :param type: Required. The type of", "str :param default: The default value for the parameter to", "pipeline can connect to over TLS transport (data is encrypted", "self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties =", "'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(TokenClaim,", "'inputs', 'type': '[NodeInput]'}, 'preset': {'key': 'preset', 'type': 'EncoderPresetBase'}, } def", "location: str \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "request. :ivar service_specification: The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification \"\"\"", "None) self.medium = kwargs.get('medium', None) self.large = kwargs.get('large', None) class", "contains the required information for Video Analyzer to connect to", "list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.", "range specified in the sequence. All required parameters must be", "self, **kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type = None # type:", "or not the video can be streamed. Only \"archive\" type", "to prevent this value to be returned as part of", "frame rate of the input video. :type frame_rate: str :param", "currently being referenced be an active pipeline. The fact that", "} def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name =", "system_data: ~video_analyzer.models.SystemData :param kind: Topology kind. Possible values include: \"Live\",", ":type video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\" _validation = { 'type': {'required': True},", "~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public key modulus. :type n:", "\"\"\"TLS endpoint describes an endpoint that the pipeline can connect", "kwargs.get('value', None) class PrivateLinkResource(Resource): \"\"\"A private link resource. Variables are", "this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required", "_attribute_map = { 'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'}, } def", "{'required': True}, 'name': {'required': True}, 'video_name': {'required': True}, 'time_sequences': {'required':", "When using HTTP, the RTSP messages are exchanged through long", "class PrivateEndpointConnection(Resource): \"\"\"The Private Endpoint Connection resource. Variables are only", ":param username: Required. Username to be presented as part of", "\"\"\"A collection of VideoEntity items. :param value: A collection of", "self, **kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None) self.type", "performed. :type resource: str :param operation: The operation type. :type", "{'required': True}, } _attribute_map = { 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type':", "**kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str", "access is allowed for specified resources under the Video Analyzer", "{ 'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required': True},", "'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self,", "of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link: A link", "and if disableArchive is set to true, then no content", "def __init__( self, **kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id']", "'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg',", "provisioning_state: The provisioning state of the private endpoint connection resource.", "'str'}, } def __init__( self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value", "into the video. Newly created videos have this value set", "live topology allows for video and audio to be captured,", "{'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, }", "self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model): \"\"\"Video archival properties. :param retention_period:", "The discriminator for derived types.Constant filled by server. :type type:", "time. :type start_time: str :param end_time: Operation end time. :type", "{'required': True}, 'video_name': {'required': True}, 'time_sequences': {'required': True}, } _attribute_map", "by exporting sequences from existing captured video through a pipeline", "See pipeline topology parameters for more information. All required parameters", "'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type':", ":param keys: List of keys which can be used to", "): super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None) self.type = kwargs['type']", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'title':", "operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'readonly':", "must be set to 'false'. :type disable_archive: str :param disable_rtsp_publishing:", "# type: str class AuthenticationBase(msrest.serialization.Model): \"\"\"Base class for access policies", "'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'}, } def", "to over clear transport (no encryption in transit). All required", "image URL. :type medium: str :param large: High resolution preview", ":vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline", "def __init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name']", "\"\"\"Base class for topology sink nodes. You probably want to", "be unique within the topology. :type name: str :param video_name:", "keys: list[~video_analyzer.models.TokenKey] \"\"\" _validation = { 'type': {'required': True}, }", "when sending a request. :ivar expiration_date: The expiration date of", "Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage", "based on the current video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar", "alg: Required. Elliptical curve algorithm to be used: ES256, ES384", "download_url: str :param archive_base_url: Video archive streaming base URL. The", "The current status of the storage account mapping. :vartype status:", "): super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.location = kwargs['location']", "is archived. :type video_name: str :param video_creation_properties: Optional video properties", "): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username", "the remote tunnel. This string is case-sensitive. :type device_id: str", "RTP packages are interleaved in the HTTP connections alongside the", "'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url',", "= { 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, } def __init__(", "self, **kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data =", "value. :type default: str \"\"\" _validation = { 'name': {'required':", "error details for the pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail", "'metric_specifications': {'readonly': True}, } _attribute_map = { 'log_specifications': {'key': 'logSpecifications',", "{'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ):", "} def __init__( self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier =", "self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource = kwargs.get('resource', None) self.operation", "video width. :type width: str :param mode: Describes the video", ":type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation = { 'type': {'required':", "error: The error detail. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _validation =", "'type': 'str'}, 'default': {'key': 'default', 'type': 'str'}, } def __init__(", "{ 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type':", "inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder preset, which defines", "} def __init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type =", "= kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): \"\"\"A", "within the topology. :type name: str \"\"\" _validation = {", "such as an RTSP source which allows for content to", "'type': 'str'}, 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, } def __init__(", "file can be played on any standard media player. It", "the topology source nodes. Source nodes enable external data to", "'x': {'required': True}, 'y': {'required': True}, } _attribute_map = {", "str :param issuers: List of expected token issuers. Token issuer", ":type type: str :param bitrate_kbps: The maximum bitrate, in kilobits", "value set to false. :type has_data: bool :param is_in_use: Required.", "The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar", "ES512. Possible values include: \"ES256\", \"ES384\", \"ES512\". :type alg: str", "proxy resource. It will not have tags and a location.", "AuthenticationBase(msrest.serialization.Model): \"\"\"Base class for access policies authentication methods. You probably", "instructions on how the input content should be processed. You", "the resolution from 4K to 1280x720. All required parameters must", "def __init__( self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None", "__init__( self, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None)", "will be ignored when sending a request. :ivar type: The", "None) self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls", "kwargs.get('code', None) self.message = kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for", "'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key':", "\"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access", "the input content. For example, it can used to change", "The supported time grain types. :vartype supported_time_grain_types: list[str] \"\"\" _validation", "to creation and last modification of the resource. :param created_by:", ":param ranges: Required. The sequence of datetime ranges. Example: '[[\"2021-10-05T03:30:00Z\",", "name: The name of the resource for which availability needs", "self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None) self.reason", "= kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by',", "**kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.location =", "{ 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self,", "{'readonly': True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly':", "} def __init__( self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type =", "= None class ResourceIdentity(msrest.serialization.Model): \"\"\"The user assigned managed identity to", ":vartype expiration: ~datetime.datetime :ivar error: Details about the error, in", "\"\"\" _validation = { 'name': {'required': True}, 'value': {'required': True},", "license information. # Code generated by Microsoft (R) AutoRest Code", "self).__init__(**kwargs) self.type = None # type: Optional[str] class TlsEndpoint(EndpointBase): \"\"\"TLS", "'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key':", ":ivar supported_time_grain_types: The supported time grain types. :vartype supported_time_grain_types: list[str]", "by the user. Value can be up to 256 characters", "the token expiration date. :vartype expiration_date: ~datetime.datetime :ivar token: The", "use of user-defined parameters, which allow for a topology to", "The desired output video width. :type width: str :param mode:", "indicates the length of individual video files (segments) which are", "'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id = kwargs['device_id']", "super(AuthenticationBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class CertificateSource(msrest.serialization.Model):", "resolutions. They are available when the video type is 'archive'", "streamed through Azure Video Analyzer Player Widget or compatible players.", "{'key': 'segmentLength', 'type': 'str'}, } def __init__( self, **kwargs ):", "to send to Azure. :param type: Required. The type of", "value is parameterized as a secret string in order to", "{'key': 'properties', 'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'action_type':", "private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account. :vartype", "the JWT token header. :type kid: str :param alg: Required.", ":type claims: list[~video_analyzer.models.TokenClaim] :param keys: List of keys which can", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData \"\"\" _validation = { 'id':", "CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording can be played", "optional property, typically used when the endpoint is behind a", "def __init__( self, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name',", "error: The error details for the pipeline job operation. :vartype", "self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None) self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model):", "id: str :ivar name: The name of the resource. :vartype", "self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): \"\"\"Access policies help define the authentication rules,", "{'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset':", "self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps',", "'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs", "Azure. :param name: Required. Name of the parameter declared in", "A provisioning token allows for a single instance of Azure", "be defined according to the scenario to be achieved and", "video files (segments) which are persisted to storage. Smaller segments", "control. :param public_network_access: Whether or not public network access is", "'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def __init__( self,", "self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type = kwargs.get('type', None) class", ":param large: High resolution preview image URL. :type large: str", "a request. :ivar id: Fully qualified resource ID for the", "edge module will agree on a set of authentication keys", "for offline processing of selected portions of archived content. Variables", "name: str :ivar display_name: The metric display name. :vartype display_name:", ":vartype code: str :ivar message: The error message. :vartype message:", "super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url', None)", "the maximum age of the video archive segments which are", "URL. The live content can be automatically played by the", "authenticating a TLS connection. By default, strict validation is used.", "class CertificateSource(msrest.serialization.Model): \"\"\"Base class for certificate sources. You probably want", "'[NodeInput]'}, 'preset': {'key': 'preset', 'type': 'EncoderPresetBase'}, } def __init__( self,", "True}, 'location': {'required': True}, 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True},", "object. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _attribute_map = { 'error': {'key':", "str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video", "self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link", "connection resource. Possible values include: \"Succeeded\", \"Creating\", \"Deleting\", \"Failed\". :vartype", "self).__init__(**kwargs) self.type = None # type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A", "versioned (for example https://vault/keys/mykey/version1) or reference a key without a", "issuers. Token issuer is valid if it matches at least", "topology_name: str :param description: An optional description for the pipeline.", "authorities should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation", "one of width or height need be provided. Possible values", "Azure. :param name: Required. Name of the parameter. :type name:", "str self.username = kwargs['username'] self.password = kwargs['password'] class VideoAnalyzer(TrackedResource): \"\"\"The", "The error object. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _attribute_map = {", "the storage account mapping. :vartype status: str \"\"\" _validation =", "= { 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, } def __init__(", "{ 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type':", "= None class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection of EdgeModuleEntity items. :param", "__init__( self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type = None #", "increments. When absent (null), all video content is retained indefinitely.", "_attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key':", "width: The desired output video width. :type width: str :param", "MDM account. :vartype source_mdm_account: str :ivar source_mdm_namespace: The source MDM", "ingestion: Public network access for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl", "is 'false'. If set to 'true', then \"disableRtspPublishing\" must be", "None class ErrorResponse(msrest.serialization.Model): \"\"\"Common error response for all Azure Resource", "= None # type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote tunnel", "True}, 'video_name': {'required': True}, 'time_sequences': {'required': True}, } _attribute_map =", "as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name:", "self, **kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id'] self.identity =", ":type certificates: list[str] \"\"\" _validation = { 'type': {'required': True},", "{'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties':", "average frame rate of the input video. :type frame_rate: str", "if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import", "self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities =", "nodes enable pipeline data to be analyzed, processed or transformed.", "claim which must be present on the token. :type name:", "self.archival = kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection of VideoEntity", "output video height. :type height: str :param width: The desired", "~datetime.datetime \"\"\" _validation = { 'expiration_date': {'required': True}, } _attribute_map", "following: * Parameters: list of user defined parameters that can", ":vartype id: str \"\"\" _validation = { 'id': {'readonly': True},", "\"Completed\", \"Failed\". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The", "Required. RSA public key exponent. :type e: str \"\"\" _validation", "operation name. :type name: str :param display: The operation display", "class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a custom preset for encoding the input", "expiration: ~datetime.datetime :ivar error: Details about the error, in case", "Required. RSA algorithm to be used: RS256, RS384 or RS512.", "'username', 'type': 'str'}, 'password': {'key': 'password', 'type': 'str'}, } def", "self.state = None self.parameters = kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): \"\"\"A", "= kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a built-in preset for encoding", "Required. The IoT Hub resource identifier. :type id: str :param", "{'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints':", "specified storage account. :param value: Array of private endpoint connections.", "supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension]", "= kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): \"\"\"Video", "and not this class directly. Known sub-classes are: JwtAuthentication. All", "values include: \"Processing\", \"Canceled\", \"Completed\", \"Failed\". :vartype state: str or", "'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type':", "on a set of authentication keys which will be auto-rotated", "details for the pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\"", "LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link: A link to", "of private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map =", "defined through the use of user-defined parameters, which allow for", "{'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox':", "{ 'name': {'required': True}, 'value': {'required': True}, } _attribute_map =", "the error, in case the pipeline job fails. :vartype error:", "version. :vartype current_key_identifier: str \"\"\" _validation = { 'key_identifier': {'required':", "into a pipeline. All required parameters must be populated in", "error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation = {", "message: str \"\"\" _attribute_map = { 'code': {'key': 'code', 'type':", "= None self.target = None self.details = None self.additional_info =", "or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "\"\"\" _attribute_map = { 'disable_archive': {'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing':", "type: str \"\"\" _validation = { 'type': {'required': True}, }", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name':", "self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model):", "= { 'type': {'required': True}, 'certificates': {'required': True}, } _attribute_map", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name:", "{ 'disable_archive': {'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type':", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class AuthenticationBase(msrest.serialization.Model): \"\"\"Base", "None) class KeyVaultProperties(msrest.serialization.Model): \"\"\"The details for accessing the encryption keys", "): super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message = kwargs.get('message',", "expiration_date: ~datetime.datetime \"\"\" _validation = { 'expiration_date': {'required': True}, }", ":type role: str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to", "'archive'. If used in a batch topology, this allows for", "{'readonly': True}, 'system_data': {'readonly': True}, 'group_id': {'readonly': True}, 'required_members': {'readonly':", "= None class ProxyResource(Resource): \"\"\"The resource model definition for a", "ingested from RTSP cameras through live pipelines or can be", "to encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The", "filled by server. :type type: str :param certificates: Required. PEM", "'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__(", "type: str self.transport = kwargs.get('transport', None) self.endpoint = kwargs['endpoint'] class", "access control for video analyzer account. :param integration: Public network", ":type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of", "current video state. All required parameters must be populated in", ":type end_time: str :param status: Operation status. :type status: str", "token header. :type kid: str :param alg: Required. RSA algorithm", "'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},", "Required. The encoder preset, which defines the recipe or instructions", "'ErrorDetail'}, } def __init__( self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name", "used. :type validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\" _validation = { 'type': {'required':", "type: str self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource): \"\"\"Pipeline job represents", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'group_id': {'readonly':", "set to 'true' causes the certificate chain trust validation to", "'str'}, } def __init__( self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value", "self.status = None class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type for all audio", "\"\"\" _attribute_map = { 'segment_length': {'key': 'segmentLength', 'type': 'str'}, }", ":type medium: str :param large: High resolution preview image URL.", "derived types.Constant filled by server. :type type: str :param issuers:", "and not this class directly. Known sub-classes are: VideoEncoderH264. All", "{'key': 'toBeExportedForShoebox', 'type': 'bool'}, } def __init__( self, **kwargs ):", "None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate =", "kwargs['sku'] self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources", "self).__init__(**kwargs) self.id = None class PrivateEndpointConnection(Resource): \"\"\"The Private Endpoint Connection", "and not this class directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset.", "method to be used when validating client API access. :type", "value: list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type':", "super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None)", "kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): \"\"\"Base class", "{'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'video_name':", "None # type: Optional[str] class TlsEndpoint(EndpointBase): \"\"\"TLS endpoint describes an", "URL. This URL can be used in conjunction with the", "all claims and respective values for it to be valid.", "None # type: Optional[str] self.name = kwargs['name'] class ProcessorNodeBase(NodeBase): \"\"\"Base", "__init__( self, **kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None)", "valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys: List of keys which", "description: str :ivar state: Current state of the pipeline (read-only).", "is set to P30D (30 days), content older than 30", "account. Variables are only populated by the server, and will", "self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource): \"\"\"The representation of an edge", "str \"\"\" _validation = { 'id': {'required': True}, 'status': {'readonly':", "{'key': 'value', 'type': '[VideoAnalyzer]'}, } def __init__( self, **kwargs ):", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link':", "__init__( self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration', None)", "how the input content should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase", "'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(SystemData,", "'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__(", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[VideoEntity]'}, 'next_link':", "{'readonly': True}, 'group_id': {'readonly': True}, 'required_members': {'readonly': True}, } _attribute_map", "characters long. :type title: str :param description: Optional description provided", "kHz). Allowed values are 96, 112, 128, 160, 192, 224,", "'@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key':", ":param default: The default value for the parameter to be", ":type bitrate_kbps: str :param frame_rate: The frame rate (in frames", "the video content authorization token on any compatible DASH or", "content_urls: Set of URLs to the video content. :vartype content_urls:", "class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection of AccessPolicyEntity items. :param value: A", "real-time ingestion, archiving and publishing of content for a unique", "encoder presets, which define the recipe or instructions on how", "None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls', None) class", "all Azure Resource Manager resources. Variables are only populated by", ":type title: str :param description: Optional video description provided by", "self.password = kwargs['password'] class VideoAnalyzer(TrackedResource): \"\"\"The Video Analyzer account. Variables", "'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'preset': {'key': 'preset',", "link to the next page of the collection (when the", "\"\"\"Base type for all video encoding presets, which define the", "Known sub-classes are: JwtAuthentication. All required parameters must be populated", "type of identity that last modified the resource. Possible values", ":vartype source_mdm_namespace: str :ivar supported_time_grain_types: The supported time grain types.", "kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token properties. A provisioning token", "last modified the resource. :type last_modified_by: str :param last_modified_by_type: The", "'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt',", "be ignored when sending a request. :ivar id: Fully qualified", "'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } _subtype_map = { 'type':", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'}", "None self.unit = None self.aggregation_type = None self.lock_aggregation_type = None", "= None class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access control for video analyzer", "class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for controlling the validation of TLS endpoints.", "available. :type name_available: bool :param reason: The reason why the", "'type': 'VideoPreviewImageUrls'}, } def __init__( self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs)", "class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters must", "the topology parameter declarations. Parameters declared here can be referenced", "self.sku = kwargs['sku'] self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters',", "'timeSequences', 'type': 'TimeSequenceBase'}, } def __init__( self, **kwargs ): super(VideoSource,", "lives. :type location: str :param identity: The identities associated to", "message: str \"\"\" _attribute_map = { 'name_available': {'key': 'nameAvailable', 'type':", "256 characters long. :type title: str :param description: Optional video", "be applied when processing content for a particular outcome. The", "'[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, } def __init__( self,", "Video Analyzer can connect to the endpoint URL. This is", "the key id present on the JWT token header. :type", "metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional", "} _attribute_map = { 'client_id': {'key': 'clientId', 'type': 'str'}, 'principal_id':", "in Key Vault. Variables are only populated by the server,", "Analyzer IoT edge module. All required parameters must be populated", ":type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor", "name of the pipeline job operation. :vartype name: str :ivar", "curve algorithm to be used: ES256, ES384 or ES512. Possible", "None) self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at", "{ 'download_url': {'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type':", "available. :type message: str \"\"\" _attribute_map = { 'name_available': {'key':", "def __init__( self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name']", "of the topology processor nodes. Processor nodes enable pipeline data", "name: str :param inputs: Required. An array of upstream node", "{'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'}, }", "filled by server. :type type: str :param audio_encoder: Describes a", "top level resource which has 'tags' and a 'location'. Variables", "description provided by the user. Value can be up to", "{ 'value': {'key': 'value', 'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type':", "Required. Operation identifier. :type name: str :param id: Operation resource", "'false'. If set to 'true', then \"disableArchive\" must be set", "~video_analyzer.models.Kind :param sku: Describes the properties of a SKU. :type", "= kwargs.get('value', None) class PrivateLinkResource(Resource): \"\"\"A private link resource. Variables", "Whether to export metric to shoebox. :vartype to_be_exported_for_shoebox: bool \"\"\"", ":param start_time: Operation start time. :type start_time: str :param end_time:", "If omitted, the encoder uses the resolution of the input", "new or existing video resource used to capture and publish", "str or ~video_analyzer.models.ActionType \"\"\" _validation = { 'name': {'required': True},", "_validation = { 'type': {'required': True}, 'name': {'required': True}, }", "\"\"\"The resource model definition for an Azure Resource Manager tracked", "\"CustomerKey\". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties", "True}, 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map =", ":param error: The error object. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _attribute_map", "to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param", "one 'noisy neighbor' does not affect other live pipelines in", "create the video resource can lead to errors when uploading", "None) self.actions_required = kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): \"\"\"Metric properties. Variables", "'tags': {'key': 'tags', 'type': '{str}'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},", "info. :vartype info: any \"\"\" _validation = { 'type': {'readonly':", "can be references across the topology nodes. * Sources: list", "user. Value can be up to 2048 characters long. :type", "If omitted, the bitrate of the input audio is used.", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name = kwargs['name']", "source which allows for content to be ingested from cameras.", "self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class", "identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the storage", "order to send to Azure. :param name: Required. Name of", "~video_analyzer.models.Properties :param is_data_action: Whether the operation applies to data-plane. :type", "{ 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type':", "Parameter value to be applied on this specific pipeline. :type", "\"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type name: str or ~video_analyzer.models.EncoderSystemPresetType \"\"\"", "processor. All required parameters must be populated in order to", "'[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type':", "of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map = {", "_attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'user_assigned_identities': {'key':", "'clientId', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, } def", ":type is_data_action: bool :param action_type: Indicates the action type. Possible", "**kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str", "be up to 256 characters long. :type title: str :param", "be ingested from RTSP cameras through live pipelines or can", "resource. Possible values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type last_modified_by_type:", "None # type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check availability request", "'str'}, } def __init__( self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length", "= { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__(", "'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions',", "self.properties = kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action', None) self.action_type =", ":ivar metric_specifications: List of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\"", "and connected to the Internet prior to the token expiration", "{'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ):", "are interleaved in the HTTP connections alongside the RTSP messages.", "ProxyResource(Resource): \"\"\"The resource model definition for a Azure Resource Manager", "is 'archive' and preview images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls", "to over TLS transport (data is encrypted in transit). All", "'EncoderProcessor'} } def __init__( self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type", "EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to", "= kwargs['url'] self.tunnel = kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource", "topology to be described here. :type description: str :param parameters:", "= kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None class", "~video_analyzer.models.VideoPublishingOptions \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':", "seconds. This property is only allowed for topologies where \"kind\"", "'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key':", "} def __init__( self, **kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream =", "type: str or ~video_analyzer.models.ParameterType :param description: Description of the parameter.", "'additional_info': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code',", "{'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'tags':", "MIT License. See License.txt in the project root for license", "the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the", "managed identity that Video Analyzer will use to access the", "lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions", ":type download_url: str :param archive_base_url: Video archive streaming base URL.", "initial call to create the video resource can lead to", "reference a key without a version (for example https://vault/keys/mykey). :type", "in 30 seconds increments. Changing this value after the initial", "None) self.frame_rate = kwargs.get('frame_rate', None) self.scale = kwargs.get('scale', None) class", "must be set to 'false'. :type disable_rtsp_publishing: str \"\"\" _attribute_map", "None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of private endpoint connection operation. All", "\"Pad\", \"PreserveAspectRatio\", \"Stretch\". :type mode: str or ~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map", "packets are interleaved on the TCP RTSP connection. When using", "the reserved capacity. Doing so will ensure that one 'noisy", "Required. The IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status:", "{'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters':", "request. :ivar client_id: The client ID. :vartype client_id: str :ivar", "'type': 'bool'}, 'reason': {'key': 'reason', 'type': 'str'}, 'message': {'key': 'message',", "for content to be ingested from cameras. * Processors: list", "super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None)", "response for all Azure Resource Manager APIs to return error", "IoT device id to use when establishing the remote tunnel.", "an endpoint that the pipeline can connect to over clear", "length of individual content files (segments) which are persisted to", "a custom preset for encoding the input content using the", "sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters must be", "'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key':", "None) self.last_modified_at = kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence of", "{'key': 'certificates', 'type': '[str]'}, } def __init__( self, **kwargs ):", "prior to the token expiration date. :vartype expiration_date: ~datetime.datetime :ivar", "= kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology parameter declaration. Declared", "be presented as part of the credentials. :type username: str", "error message. :type message: str \"\"\" _attribute_map = { 'code':", "error target. :vartype target: str :ivar details: The error details.", "of keys which can be used to validate access tokens.", "properties. Variables are only populated by the server, and will", "of days, up to a maximum of 10 years. For", "'expirationDate', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(ListProvisioningTokenInput,", "The type of identity that last modified the resource. Possible", "supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The", "\"\"\" _validation = { 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True},", "of the topology to be described here. :type description: str", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__( self,", "'value': {'key': 'value', 'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type': 'str'},", "} def __init__( self, **kwargs ): super(VideoSink, self).__init__(**kwargs) self.type =", "self.type = None # type: Optional[str] self.kid = kwargs['kid'] class", "publish content. Note: if downstream of RTSP source, and if", "'download_url': {'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'},", "be auto-rotated as long as the module is able to", "'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'},", "'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs", "request. :ivar id: Fully qualified resource ID for the resource.", "{'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation':", "if the resource name is available. :type name_available: bool :param", ":type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state of the", "str \"\"\" _attribute_map = { 'small': {'key': 'small', 'type': 'str'},", "'type': '[str]'}, } def __init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs)", ":vartype system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology kind. Possible values", "applied across all the cameras. Individual instance properties can be", "type: str :param issuers: List of expected token issuers. Token", "All rights reserved. # Licensed under the MIT License. See", "will be ignored when sending a request. All required parameters", "= kwargs['alg'] self.x = kwargs['x'] self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource):", "None) class TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence of datetime ranges as a", "the associated storage account. Variables are only populated by the", "the connection. :type description: str :param actions_required: A message indicating", "to 'false'. :type disable_archive: str :param disable_rtsp_publishing: When set to", "played in \"live mode\" with latencies which are approximately double", ":type validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\" _validation = { 'type': {'required': True},", "'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type':", "A collection of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link:", "additional info type. :vartype type: str :ivar info: The additional", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology kind.", ":param value: A collection of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob]", "kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error = kwargs.get('error', None)", "provided to the Azure Video Analyzer IoT edge module through", "{ 'key_identifier': {'required': True}, 'current_key_identifier': {'readonly': True}, } _attribute_map =", "IoT edge module. All required parameters must be populated in", "= kwargs['sku'] self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None)", "'type': 'TimeSequenceBase'}, } def __init__( self, **kwargs ): super(VideoSource, self).__init__(**kwargs)", "'[str]'}, 'claims': {'key': 'claims', 'type': '[TokenClaim]'}, 'keys': {'key': 'keys', 'type':", "} def __init__( self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name =", "'n': {'required': True}, 'e': {'required': True}, } _attribute_map = {", "or ~video_analyzer.models.SkuName :ivar tier: The SKU tier. Possible values include:", "topology definition. :type topology_name: str :param description: An optional description", "allow for seamless key rotation of the token signing key.", "None # type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote tunnel securely", "'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate',", "Code Generator. # Changes may cause incorrect behavior and will", "= kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): \"\"\"Defines how the Video Analyzer", "expiration date. :vartype expiration_date: ~datetime.datetime :ivar token: The token blob", "self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None self.status =", "the topology and can optionally have default values to be", "RSA algorithm to be used: RS256, RS384 or RS512. Possible", "= kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at',", "content authorization token to download the video MP4 file. The", "value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to the next page", "super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion', None)", "{ 'service_specification': {'readonly': True}, } _attribute_map = { 'service_specification': {'key':", "processor nodes. Processor nodes enable pipeline data to be analyzed,", "archival properties. :type archival: ~video_analyzer.models.VideoArchival \"\"\" _validation = { 'id':", "referenced, doesn't necessarily indicate that data is being received. For", ":param time_sequences: Required. Describes a sequence of datetime ranges. The", "self.scale = kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): \"\"\"A custom preset for", "display description. :vartype display_description: str :ivar unit: The metric unit.", "error: The error details for the live pipeline operation. :vartype", "to be parameterized. This allows individual pipelines refer to different", ":ivar status: The status of the live pipeline operation. :vartype", ":param preview_image_urls: Video preview image URLs. These URLs can be", "the topology nodes through the use of \"${PARAMETER_NAME}\" string pattern.", "__init__( self, **kwargs ): super(TokenKey, self).__init__(**kwargs) self.type = None #", "type of key used to encrypt the Account Key. Possible", "'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'},", "topology, used for offline processing of selected portions of archived", "'type': {'required': True}, 'username': {'required': True}, 'password': {'required': True}, }", "duration format (i.e. \"PT30S\" equals 30 seconds) and can vary", "def __init__( self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type = None", "single video. :vartype token: str \"\"\" _validation = { 'expiration_date':", "content using the encoder processor. All required parameters must be", "self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = kwargs['video_name'] self.video_creation_properties", "video description provided by the user. Value can be up", "def __init__( self, **kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", "super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None)", "datetime ranges. The video source only picks up recorded media", "type. Possible values include: \"Average\", \"Count\", \"Total\". :vartype aggregation_type: str", "self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource):", "'EndpointBase'}, } def __init__( self, **kwargs ): super(RtspSource, self).__init__(**kwargs) self.type", "'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},", ":type last_modified_by: str :param last_modified_by_type: The type of identity that", "= { 'id': {'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity',", "super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs.get('value', None) class", "The diagnostic log category display name. :vartype display_name: str :ivar", "the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the", ":param certificates: Required. PEM formatted public certificates. One certificate per", "the input video should be processed. You probably want to", "} _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'user_assigned_identities':", "'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type':", "'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type':", "metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\" _validation = { 'log_specifications': {'readonly': True}, 'metric_specifications':", "Possible values include: \"Processing\", \"Canceled\", \"Completed\", \"Failed\". :vartype state: str", "'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnection,", "= { 'status': {'key': 'status', 'type': 'str'}, 'description': {'key': 'description',", "self).__init__(**kwargs) self.role = kwargs.get('role', None) self.authentication = kwargs.get('authentication', None) class", "self.height = kwargs.get('height', None) self.width = kwargs.get('width', None) self.mode =", "the Video Analyzer account is (optionally) encrypted. Variables are only", "this capacity, then the service will disconnect temporarily from the", "super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content", "30 seconds increments. :type segment_length: str \"\"\" _attribute_map = {", "of datetime ranges. The video source only picks up recorded", "self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a built-in preset for", "present on the token. :type value: str \"\"\" _validation =", "str \"\"\" _validation = { 'type': {'required': True}, 'username': {'required':", "ignored when sending a request. :ivar name: The metric dimension", "sku: Required. Describes the properties of a SKU. :type sku:", "assigned managed identity's resource identifier to use when accessing a", "self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state =", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True},", "self).__init__(**kwargs) self.type = None self.info = None class ErrorDetail(msrest.serialization.Model): \"\"\"The", "kwargs.get('value', None) class PemCertificateList(CertificateSource): \"\"\"A list of PEM formatted certificates.", "kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list of private link resources.", "ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\" _validation = { 'type': {'required':", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags:", "trusted certificate authorities when authenticating a TLS connection. A null", "the cloud. After the initial handshake, the IoT edge module", "'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, } def", "_attribute_map = { 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'next_link': {'key':", "if it matches at least one of the given values.", "'SystemData'}, } def __init__( self, **kwargs ): super(ProxyResource, self).__init__(**kwargs) class", "the pipeline (read-only). Possible values include: \"Processing\", \"Canceled\", \"Completed\", \"Failed\".", "'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__(", "'str'}, } def __init__( self, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name", "'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, } def __init__( self, **kwargs", "lived HTTP connections, and the RTP packages are interleaved in", "resource. :type created_by: str :param created_by_type: The type of identity", "Analyzer Player Widget or compatible players. Exported videos can be", "token to expose a WebSocket tunneled RTSP stream. It is", "values include: \"SystemKey\", \"CustomerKey\". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param", "None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None self.encryption =", "input content should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase \"\"\" _validation", "'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs ): super(CertificateSource,", "types.Constant filled by server. :type type: str :param kid: Required.", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} }", "of the storage account resource. Video Analyzer relies on tables,", "be ignored when sending a request. :ivar name: The diagnostic", "'str'}, } def __init__( self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name", "If the mode is 'Pad' or 'Stretch' then both width", "subject name validation to be skipped. Default is 'false'. :type", "SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description of", "TCP or HTTP. When using TCP, the RTP packets are", "using TCP, the RTP packets are interleaved on the TCP", "MP4 file can be played on any standard media player.", "'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def __init__( self,", "fact that is being referenced, doesn't necessarily indicate that data", "~video_analyzer.models.TlsValidationOptions \"\"\" _validation = { 'type': {'required': True}, 'credentials': {'required':", "to be skipped. Default is 'false'. :type ignore_hostname: str :param", "self.description = kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology parameter declaration.", "_attribute_map = { 'small': {'key': 'small', 'type': 'str'}, 'medium': {'key':", ":vartype token: str \"\"\" _validation = { 'expiration_date': {'readonly': True},", "archival properties. :param retention_period: Video retention period indicates the maximum", "None) self.video_encoder = kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): \"\"\"Base class for", "{'key': 'properties.role', 'type': 'str'}, 'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'}, }", "ongoing video recording can be played in \"live mode\" with", "'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ): super(ProxyResource,", "= None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions = None self.enable_regional_mdm_account", "and will be ignored when sending a request. :ivar log_specifications:", "TrackedResource(Resource): \"\"\"The resource model definition for an Azure Resource Manager", "__init__( self, **kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification = None class", "self.name = None self.type = None self.system_data = None class", "{'required': True}, 'type': {'required': True}, } _attribute_map = { 'name':", "directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required parameters must", "resource type. :type type: str \"\"\" _attribute_map = { 'name':", "Token audience is valid if it matches at least one", ":param width: The desired output video width. :type width: str", "accessing a resource. All required parameters must be populated in", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} }", "self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): \"\"\"A collection of Operation", "'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs)", "topology processor nodes. You probably want to use the sub-classes", "kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): \"\"\"A custom preset for encoding video", "of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link", "= None class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and password credentials. All required", "self.description = kwargs.get('description', None) self.default = kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model):", ":ivar expiration: The date-time by when this pipeline job will", "'identity': {'required': True}, 'status': {'readonly': True}, } _attribute_map = {", "recording can be played in \"live mode\" with latencies which", "kwargs.get('bitrate_kbps', None) self.state = None self.parameters = kwargs.get('parameters', None) class", "values include: \"Invalid\", \"AlreadyExists\". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param", "live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = {", "used to capture and publish content. Note: if downstream of", "additional info. :vartype info: any \"\"\" _validation = { 'type':", "edge module to be initialized and authorized to the cloud", "as the same processing is to be applied across all", "the resource lives. :type location: str \"\"\" _validation = {", "in conjunction with the video content authorization token on any", "{ 'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type':", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology", "metric dimension. Variables are only populated by the server, and", "'metricSpecifications', 'type': '[MetricSpecification]'}, } def __init__( self, **kwargs ): super(ServiceSpecification,", "'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def", "derived types.Constant filled by server. :type type: str :param certificates:", "the video type is 'archive' and video archiving is enabled.", "'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption',", "desired expiration date of the registration token. The Azure Video", "used as input of the current node. :type node_name: str", "server. :type type: str :param audio_encoder: Describes a custom preset", "True}, 'error': {'readonly': True}, } _attribute_map = { 'name': {'key':", "video is kept in storage. Value must be specified in", "None) self.description = kwargs.get('description', None) self.state = None self.expiration =", "super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None)", "super(IotHub, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs['identity'] self.status =", "specific pipeline. :type value: str \"\"\" _validation = { 'name':", "key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault identity. :type identity:", "for the same IoT edge module in case the module", "cameras and/or generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase \"\"\" _validation", "super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None self.metric_specifications = None class SinkNodeBase(NodeBase):", "\"Count\", \"Total\". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The", "the topology sink nodes. Sink nodes allow pipeline data to", "objects. You probably want to use the sub-classes and not", ":vartype id: str :ivar name: The name of the resource.", "__init__( self, **kwargs ): super(Sku, self).__init__(**kwargs) self.name = kwargs['name'] self.tier", "self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = kwargs.get('trusted_certificates', None)", "{'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__( self, **kwargs ):", "str :ivar required_members: The private link resource required member names.", "types.Constant filled by server. :type type: str :param ranges: Required.", "class for access policies authentication methods. You probably want to", "= None self.type = None self.system_data = None class ProxyResource(Resource):", "~video_analyzer.models.TunnelBase \"\"\" _validation = { 'type': {'required': True}, 'credentials': {'required':", "connections alongside the RTSP messages. Possible values include: \"Http\", \"Tcp\".", "enable_regional_mdm_account: Indicates whether regional MDM account is enabled. :vartype enable_regional_mdm_account:", "about the available video actions and its dynamic properties based", "__init__( self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities", "VideoSink(SinkNodeBase): \"\"\"Video sink in a live topology allows for video", "The metric unit. Possible values include: \"Bytes\", \"Count\", \"Milliseconds\". :vartype", "Video Analyzer's list of trusted authorities should be used. :type", "None) self.message = kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): \"\"\"Base class for", ":param retention_period: Video retention period indicates the maximum age of", "applications and scenarios. Possible values include: \"Archive\", \"File\". :vartype type_properties_type:", "str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level", ":type rtsp_tunnel_url: str :param preview_image_urls: Video preview image URLs. These", "= kwargs.get('height', None) self.width = kwargs.get('width', None) self.mode = kwargs.get('mode',", "when this pipeline job will be automatically deleted from your", "be captured, optionally archived, and published via a video resource.", "None) self.segment_length = kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period', None) class", "'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, **kwargs ): super(SinkNodeBase,", "def __init__( self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None", "more data sources nodes such as an RTSP source which", "width and height must be specified. Else if the mode", "str :param status: Operation status. :type status: str :param error:", "on the service. These will not take effect if the", "IotHub(msrest.serialization.Model): \"\"\"The IoT Hub details. Variables are only populated by", "enabled. :type archive_base_url: str :param rtsp_tunnel_url: Video low-latency streaming URL.", "} def __init__( self, **kwargs ): super(Sku, self).__init__(**kwargs) self.name =", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__(", "tunnel. This string is case-sensitive. :type device_id: str \"\"\" _validation", "token on any compatible DASH or HLS players by appending", "True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},", "\"\"\"Base class for tunnel objects. You probably want to use", "of expected token audiences. Token audience is valid if it", "can be used to validate access tokens. Having multiple keys", "\"Approved\", \"Rejected\". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The", "with the H.264 (AVC) codec. All required parameters must be", "\"\"\"Group level network access control. :param public_network_access: Whether or not", "'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace':", "\"\"\"A sequence of datetime ranges as a string. You probably", "or compatible players. Exported videos can be downloaded as MP4", "class directly. Known sub-classes are: AudioEncoderAac. All required parameters must", "be used on a pipeline node. All required parameters must", "but generate larger volume of storage transactions. Larger segments reduce", "= '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class AuthenticationBase(msrest.serialization.Model): \"\"\"Base class for", "widget. Alternatively, this URL can be used in conjunction with", "name of the live pipeline operation. :vartype name: str :ivar", "{'key': 'info', 'type': 'object'}, } def __init__( self, **kwargs ):", "None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): \"\"\"The video scaling", "of the live pipeline operation. :vartype name: str :ivar status:", "'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity',", "{'key': 'alg', 'type': 'str'}, 'n': {'key': 'n', 'type': 'str'}, 'e':", ":type iot_hub_name: str :param device_id: Required. The IoT device id", "and password credentials. All required parameters must be populated in", "list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to the next page of", "{'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, }", "given values. :type audiences: list[str] :param claims: List of additional", "associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param", "super(Sku, self).__init__(**kwargs) self.name = kwargs['name'] self.tier = None class StorageAccount(msrest.serialization.Model):", "{ 'type': {'required': True}, 'name': {'required': True}, 'video_name': {'required': True},", "~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity", "kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None self.encryption", "'type': 'str'}, 'info': {'key': 'info', 'type': 'object'}, } def __init__(", "\"Rejected\". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason", "super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small', None) self.medium = kwargs.get('medium', None)", "'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'},", "kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options', None) class", "requests in each blob. :vartype blob_duration: str \"\"\" _validation =", "name: str :ivar display_name: The display name for the dimension.", "EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a built-in preset for encoding the input content", "to 10 years, in 1 day increments. When absent (null),", "PemCertificateList(CertificateSource): \"\"\"A list of PEM formatted certificates. All required parameters", "{'key': 'ranges', 'type': 'str'}, } def __init__( self, **kwargs ):", ":param node_name: Required. The name of the upstream node in", "a unique RTSP camera. Variables are only populated by the", "Operation properties format. :type properties: ~video_analyzer.models.Properties :param is_data_action: Whether the", "self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message = kwargs.get('message', None) class", "or HTTP. When using TCP, the RTP packets are interleaved", "private endpoint connection operation. All required parameters must be populated", "'false'. :type ignore_signature: str \"\"\" _attribute_map = { 'ignore_hostname': {'key':", "for the \"token\" query string parameter. The token is specific", "input content using the encoder processor. All required parameters must", "): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info = None class", "about the error for a failed pipeline job. :param code:", "is used only for low latency video streaming. Default is", "Default is 'false'. :type ignore_signature: str \"\"\" _attribute_map = {", "Known sub-classes are: VideoEncoderH264. All required parameters must be populated", "'consumption', 'type': 'GroupLevelAccessControl'}, } def __init__( self, **kwargs ): super(NetworkAccessControl,", "ignored when sending a request. :ivar id: The ARM identifier", "Analyzer video resource to be ingested into a pipeline. Currently", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self,", "super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkResource(Resource): \"\"\"A private", ":type type: str :param iot_hub_name: Required. Name of the IoT", "day) and can vary between 1 day to 10 years,", "{'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, } def __init__( self, **kwargs ):", "'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs", "'str'}, 'issuers': {'key': 'issuers', 'type': '[str]'}, 'audiences': {'key': 'audiences', 'type':", "'[MetricSpecification]'}, } def __init__( self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications", "VideoAnalyzer items. :param value: A collection of VideoAnalyzer items. :type", "'type': 'str'}, } def __init__( self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs)", "already exists. :param title: Optional title provided by the user.", "True}, 'type_properties_type': {'readonly': True}, 'flags': {'readonly': True}, 'content_urls': {'readonly': True},", "the resource. :type last_modified_by: str :param last_modified_by_type: The type of", ":type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public", "instructions on how audio should be processed. You probably want", "def __init__( self, **kwargs ): super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height',", "topology parameter. See pipeline topology parameters for more information. All", "set to \"live\". :type retention_period: str \"\"\" _attribute_map = {", "and 256. If omitted, the bitrate of the input audio", "__init__( self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None class", "private link resource required member names. :vartype required_members: list[str] :param", "Required. Describes a sequence of datetime ranges. The video source", "'type': 'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password': {'key': 'password',", "'tunnel', 'type': 'TunnelBase'}, } def __init__( self, **kwargs ): super(UnsecuredEndpoint,", "is available when the video type is 'archive' and video", "= kwargs['id'] self.identity = kwargs.get('identity', None) self.status = None class", "AudioEncoderAac(AudioEncoderBase): \"\"\"A custom preset for encoding audio with the AAC", "display name. :vartype display_name: str :ivar blob_duration: The time range", "Azure. :param id: Required. The IoT Hub resource identifier. :type", "For example, it can used to change the resolution from", "self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state =", "\"SingleLayer_2160p_H264_AAC\". :type name: str or ~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation = {", "'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters',", "or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to be used when", "content token grants access to the video content URLs.\". Variables", "seconds) and can vary between 30 seconds to 5 minutes,", "status of the storage account mapping. :vartype status: str \"\"\"", "class RsaTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated with RSA", "~video_analyzer.models.CertificateSource :param validation_options: Validation options to use when authenticating a", "self.provisioning_state = None self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection", "content via the video resource. This property is only allowed", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, }", "{ 'tags': {'key': 'tags', 'type': '{str}'}, 'identity': {'key': 'identity', 'type':", "'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'},", "'name': {'required': True}, 'inputs': {'required': True}, 'video_name': {'required': True}, }", "= kwargs.get('medium', None) self.large = kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional", "True}, 'system_data': {'readonly': True}, 'location': {'required': True}, } _attribute_map =", "this class directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All", "type. :vartype type: str :ivar info: The additional info. :vartype", "SKU tier. Possible values include: \"Standard\". :vartype tier: str or", "} def __init__( self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type =", "'default': {'key': 'default', 'type': 'str'}, } def __init__( self, **kwargs", "pertaining to creation and last modification of the resource. :param", "resolution of the input video. :type scale: ~video_analyzer.models.VideoScale \"\"\" _validation", "the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint", "information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The private link resource", "metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData \"\"\"", "send to Azure. :param name: Required. Name of the parameter.", "kwargs.get('frame_rate', None) self.scale = kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): \"\"\"A custom", "super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error =", "inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required.", "availability result. :param name_available: Indicates if the resource name is", "True}, 'certificates': {'required': True}, } _attribute_map = { 'type': {'key':", "def __init__( self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length',", "_validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs':", "is available when the video type is 'file' and video", "have default values to be used when they are not", "'location', 'type': 'str'}, } def __init__( self, **kwargs ): super(TrackedResource,", "'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account':", "'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key':", "under the Video Analyzer account. Possible values include: \"Enabled\", \"Disabled\".", "access the storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The", "H.264 (AVC) codec. All required parameters must be populated in", "__init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type = None #", "of identity that created the resource. Possible values include: \"User\",", "'user_assigned_identity': {'required': True}, } _attribute_map = { 'user_assigned_identity': {'key': 'userAssignedIdentity',", "super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use =", "image URL. :type large: str \"\"\" _attribute_map = { 'small':", "the endpoint. Possible values include: \"ClientApi\". :type type: str or", "URL of the endpoint. :type endpoint_url: str :param type: Required.", "'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type':", ":param validation_options: Validation options to use when authenticating a TLS", "private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information about the", "one response). :type next_link: str \"\"\" _attribute_map = { 'value':", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'title': {'key': 'properties.title', 'type': 'str'},", "{'key': 'isDataAction', 'type': 'bool'}, 'action_type': {'key': 'actionType', 'type': 'str'}, }", "encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\" _validation = { 'type':", "self.actions_required = kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): \"\"\"Metric properties. Variables are", "the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault", "} def __init__( self, **kwargs ): super(NodeBase, self).__init__(**kwargs) self.type =", "topology. When activated, this pipeline job will process content according", "\"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type created_by_type: str or ~video_analyzer.models.CreatedByType :param", ":type is_in_use: bool \"\"\" _validation = { 'can_stream': {'required': True},", "by when this pipeline job will be automatically deleted from", "str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to be used", "128, 160, 192, 224, and 256. If omitted, the bitrate", "class ResourceIdentity(msrest.serialization.Model): \"\"\"The user assigned managed identity to use when", ":param type: The resource type. :type type: str \"\"\" _attribute_map", "Known sub-classes are: RtspSource, VideoSource. All required parameters must be", "{'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, }", "} def __init__( self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type =", "allowed for topologies where \"kind\" is set to \"live\". :type", "of the service. Possible values include: \"Pending\", \"Approved\", \"Rejected\". :type", "only populated by the server, and will be ignored when", "'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints',", "be up to 2048 characters long. :type description: str :param", "def __init__( self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase'", "None) self.audiences = kwargs.get('audiences', None) self.claims = kwargs.get('claims', None) self.keys", "URL. This is an optional property, typically used when the", "= kwargs['kid'] class EccTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated", "will not have tags and a location. Variables are only", "bitrate_kbps: str \"\"\" _validation = { 'type': {'required': True}, }", "level parameter values for the user-defined topology parameters. A pipeline", "name: str :param display: The operation display name. :type display:", "token claims to be validated. Token must contains all claims", "PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job represents a unique instance of a batch", "nodes. Processor nodes enable pipeline data to be analyzed, processed", "'type': '[TokenClaim]'}, 'keys': {'key': 'keys', 'type': '[TokenKey]'}, } def __init__(", "instance properties can be defined through the use of user-defined", "The reason why the given name is not available. Possible", "self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model):", "Required. The user assigned managed identity's resource identifier to use", "= { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs", "between 30 seconds to 5 minutes, in 30 seconds increments.", "to Azure. :param expiration_date: Required. The desired expiration date of", "\"\"\" _validation = { 'node_name': {'required': True}, } _attribute_map =", "are: SecureIotDeviceRemoteTunnel. All required parameters must be populated in order", "'str'}, } def __init__( self, **kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name", "pipeline topology. It is recommended that the expected use of", "or reference a key without a version (for example https://vault/keys/mykey).", "token allows for a single instance of Azure Video analyzer", "is allowed for specified resources under the Video Analyzer account.", "when sending a request. :ivar id: Fully qualified resource ID", "): super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description',", "class VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection of VideoEntity items. :param value: A", "a request. :ivar name: The metric dimension name. :vartype name:", "Required. X coordinate. :type x: str :param y: Required. Y", "'@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntityCollection,", "automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar error:", "applied. Default mode is 'Pad'. If the mode is 'Pad'", "Azure Resource Manager tracked top level resource which has 'tags'", "details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional info.", "'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, } def __init__( self,", "given name is not available. Possible values include: \"Invalid\", \"AlreadyExists\".", "from the source. :type rtsp_tunnel_url: str :param preview_image_urls: Video preview", "in your account. :type bitrate_kbps: int :ivar state: Current state", "def __init__( self, **kwargs ): super(IotHub, self).__init__(**kwargs) self.id = kwargs['id']", "~datetime.datetime \"\"\" _attribute_map = { 'created_by': {'key': 'createdBy', 'type': 'str'},", "'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key':", "video resource can lead to errors when uploading content to", "Azure Video Analyzer IoT edge module must be initialized and", "'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'},", "'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__( self, **kwargs ): super(VideoAnalyzerIdentity,", "'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},", "video scaling mode to be applied. Default mode is 'Pad'.", "PipelineTopology items. :param value: A collection of PipelineTopology items. :type", "source_mdm_namespace: The source MDM namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types:", "filled by server. :type type: str :param bitrate_kbps: The maximum", "Private Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections:", "kwargs['kid'] class EccTokenKey(TokenKey): \"\"\"Required validation properties for tokens generated with", "kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology describes the processing steps", "Token signature must match exactly one key. :type keys: list[~video_analyzer.models.TokenKey]", "download the video MP4 file. The resulting MP4 file can", "= kwargs['name'] self.tier = None class StorageAccount(msrest.serialization.Model): \"\"\"The details about", "): super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link',", "str \"\"\" _validation = { 'node_name': {'required': True}, } _attribute_map", "will be ignored when sending a request. :ivar expiration_date: The", "or ~video_analyzer.models.ActionType \"\"\" _validation = { 'name': {'required': True}, }", "a value. :type default: str \"\"\" _validation = { 'name':", "generated for the same IoT edge module in case the", "} def __init__( self, **kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification =", "log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric specifications. :vartype metric_specifications:", "will be ignored when sending a request. :param tags: A", "not the video is currently being referenced be an active", "video resource used to capture and publish content. Note: if", "{'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity':", "The resource type. :type type: str \"\"\" _attribute_map = {", "source_mdm_namespace: str :ivar supported_time_grain_types: The supported time grain types. :vartype", "} def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id =", "_attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'description': {'key':", "self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.sku = kwargs.get('sku', None) self.description", "diagnostic log category display name. :vartype display_name: str :ivar blob_duration:", "True}, 'video_name': {'required': True}, } _attribute_map = { 'type': {'key':", "initialized and connected to the Internet prior to the token", "or Kbps, at which video should be encoded. If omitted,", "'str'}, } def __init__( self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type", "current key used to encrypt Video Analyzer account, including the", "Tokens (JWT). All required parameters must be populated in order", "_validation = { 'type': {'required': True}, 'iot_hub_name': {'required': True}, 'device_id':", "{'readonly': True}, } _attribute_map = { 'tags': {'key': 'tags', 'type':", "of 100 Kbps. If the RTSP camera exceeds this capacity,", "class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote tunnel securely established using IoT Hub", "= kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): \"\"\"Metric properties. Variables are only", "dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional MDM", "~video_analyzer.models.VideoEncoderBase \"\"\" _validation = { 'type': {'required': True}, } _attribute_map", "'segment_length': {'key': 'segmentLength', 'type': 'str'}, } def __init__( self, **kwargs", "token claims. All required parameters must be populated in order", "self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information about the video", "None) class NodeInput(msrest.serialization.Model): \"\"\"Describes an input signal to be used", "for tunnel objects. You probably want to use the sub-classes", "self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions = None self.enable_regional_mdm_account = None", "= None self.expiration = None self.error = None self.parameters =", ":param reason: The reason why the given name is not", "larger volume of storage transactions. Larger segments reduce the amount", "Analyzer IoT edge module must be initialized and connected to", "must be specified in ISO8601 duration format (i.e. \"P1D\" equals", "in order to send to Azure. :ivar id: Fully qualified", "**kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "of content for a unique RTSP camera. Variables are only", "'type': '[NodeInput]'}, 'preset': {'key': 'preset', 'type': 'EncoderPresetBase'}, } def __init__(", "to be ingested from cameras. * Processors: list of nodes", "= { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid',", "to change how the video sink publishes content via the", "Resource on which the operation is performed. :type resource: str", "URL for Video Analyzer to connect to. :type url: str", "None # type: Optional[str] class TokenKey(msrest.serialization.Model): \"\"\"Key properties for JWT", ":param identity: The Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar", "played by the Azure Video Analyzer player widget. Alternatively, this", "super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku = kwargs['sku'] self.description =", "'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description': {'key':", "**kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model): \"\"\"An", "self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type = None # type:", "properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An", "sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters must be populated in", "prior to the token expiration date. :type expiration_date: ~datetime.datetime \"\"\"", "for certificate sources. You probably want to use the sub-classes", "for tracking the status of an operation on the live", "Video flags contain information about the available video actions and", "by server. :type type: str :param name: Required. Node name.", "Node name. Must be unique within the topology. :type name:", "'iso-8601'}, } def __init__( self, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by", "to be created on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param", "video resource within Azure Video Analyzer. Videos can be ingested", "'type': 'TunnelBase'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint':", "reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why the", "vary between 30 seconds to 5 minutes, in 30 seconds", "\"\"\" _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'description':", "reused across many pipeline instances which share the same processing", "transactions. Larger segments reduce the amount of storage transactions while", "to Azure. :param endpoint_url: The URL of the endpoint. :type", "'kind': {'required': True}, 'sku': {'required': True}, } _attribute_map = {", "video scaling information. :param height: The desired output video height.", "key. Token signature must match exactly one key. :type keys:", "sending a request. :ivar service_specification: The service specifications. :vartype service_specification:", "pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology", "{'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time':", "{'key': 'deviceId', 'type': 'str'}, } def __init__( self, **kwargs ):", "kwargs.get('tags', None) self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint describes", "_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key':", "latency. Value must be specified in ISO8601 duration format (i.e.", "properties. :param retention_period: Video retention period indicates the maximum age", "self.consumption = kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): \"\"\"Describes an input signal", "self).__init__(**kwargs) self.height = kwargs.get('height', None) self.width = kwargs.get('width', None) self.mode", "be only one range specified in the sequence. All required", "'type': 'str'}, 'password': {'key': 'password', 'type': 'str'}, } def __init__(", "{'key': 'location', 'type': 'str'}, } def __init__( self, **kwargs ):", "The operation display name. :type display: ~video_analyzer.models.OperationDisplay :param origin: Origin", "super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model): \"\"\"An operation. All", "tunneled RTSP stream. It is available when the video type", "{'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def __init__( self, **kwargs ):", "'system_data': {'readonly': True}, 'edge_module_id': {'readonly': True}, } _attribute_map = {", "property, typically used when the endpoint is behind a firewall.", "aggregation type. Possible values include: \"Average\", \"Count\", \"Total\". :vartype aggregation_type:", "Videos ingested through live pipelines can be streamed through Azure", "**kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link =", "used, for example, when the topology is used only for", "the response for all Azure Resource Manager resources. Variables are", "self.username = kwargs['username'] self.password = kwargs['password'] class VideoAnalyzer(TrackedResource): \"\"\"The Video", "super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height', None) self.width = kwargs.get('width', None)", "None) self.width = kwargs.get('width', None) self.mode = kwargs.get('mode', None) class", "ID of the storage account resource. Video Analyzer relies on", ":vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by", "on events or camera may not be accessible at the", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map =", "in order to send to Azure. :param name: Required. Operation", "metric aggregation type. Possible values include: \"Average\", \"Count\", \"Total\". :vartype", "emitted by service. Variables are only populated by the server,", "parameter to be used if the pipeline does not specify", "type: Optional[str] self.kid = kwargs['kid'] class EccTokenKey(TokenKey): \"\"\"Required validation properties", "str \"\"\" _validation = { 'id': {'required': True}, 'identity': {'required':", "{'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, }", "a resource. :type user_assigned_identity: str \"\"\" _validation = { 'user_assigned_identity':", "\"\"\"The update operation for a Video Analyzer account. Variables are", "declaration. Declared parameters can and must be referenced throughout the", "{'required': True}, 'name': {'required': True}, 'endpoint': {'required': True}, } _attribute_map", "super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role', None) self.authentication = kwargs.get('authentication', None)", "unique within the topology. :type name: str :param video_name: Required.", "\"live\". :type retention_period: str \"\"\" _attribute_map = { 'title': {'key':", "= kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence of absolute datetime", ":param description: An optional description for the pipeline. :type description:", "def __init__( self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available',", "self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs['value'] class TrackedResource(Resource): \"\"\"The", "The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param", "still image from the video archive in different resolutions. They", "self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity', None) self.status =", "indicates the maximum age of the video archive segments which", "published. These are only allowed for topologies where \"kind\" is", "inputs: list[~video_analyzer.models.NodeInput] \"\"\" _validation = { 'type': {'required': True}, 'name':", "url: str :param tunnel: Describes the tunnel through which Video", "is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List", "self).__init__(**kwargs) self.client_id = None self.principal_id = None class UsernamePasswordCredentials(CredentialsBase): \"\"\"Username", "segment length. It is available when the video type is", "available. Possible values include: \"Invalid\", \"AlreadyExists\". :type reason: str or", "the input audio is used. :type bitrate_kbps: str \"\"\" _validation", "def __init__( self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration',", ":param inputs: Required. An array of upstream node references within", "'str'}, 'name': {'key': 'name', 'type': 'str'}, 'video_name': {'key': 'videoName', 'type':", "**kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkResource(Resource):", "AccountEncryption(msrest.serialization.Model): \"\"\"Defines how the Video Analyzer account is (optionally) encrypted.", "to be used when they are not defined in the", "trusted authorities should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options:", "def __init__( self, **kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type = None", "super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class VideoEntity(ProxyResource):", "in conjunction with the video content authorization token to expose", "'alg': {'key': 'alg', 'type': 'str'}, 'n': {'key': 'n', 'type': 'str'},", "list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of a new or existing", "True}, } _attribute_map = { 'client_id': {'key': 'clientId', 'type': 'str'},", "def __init__( self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", "True}, 'endpoint': {'required': True}, } _attribute_map = { 'type': {'key':", "kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model):", "\"\"\"A collection of information about the state of the connection", "filled by server. :type type: str :param ranges: Required. The", "self.public_network_access = kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): \"\"\"The IoT Hub details.", "described here. :type description: str :param parameters: List of the", "service. These will not take effect if the video already", "in order to prevent this value to be returned as", "are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters must be populated", "capacity in Kbps reserved for the live pipeline. The allowed", "self.name_available = kwargs.get('name_available', None) self.reason = kwargs.get('reason', None) self.message =", "None) self.next_link = kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): \"\"\"Details about the", "'VideoSink'} } def __init__( self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type", "type: str class RtspSource(SourceNodeBase): \"\"\"RTSP source allows for media from", "value can be updated at any time and the new", "= { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__( self, **kwargs", "str or ~video_analyzer.models.Kind :param sku: Required. Describes the properties of", "str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under", "The frame rate (in frames per second) of the encoded", "specifications. Variables are only populated by the server, and will", "- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of", "str \"\"\" _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'},", "def __init__( self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value',", "storage_accounts: The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount]", "Source nodes enable external data to be ingested by the", "str or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation = { 'type': {'required': True},", "{'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error':", "{'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ):", "\"\"\"Operation details. :param provider: The service provider. :type provider: str", "is enabled. :type archive_base_url: str :param rtsp_tunnel_url: Video low-latency streaming", "self.name = None self.display_name = None self.to_be_exported_for_shoebox = None class", "value must be greater than zero, and less than or", "'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type': 'VideoArchival'},", "provisioning token itself is short lived and it is only", "version (for example https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier: The", "frame rate (in frames per second) of the encoded video.", "'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'}, } def __init__( self, **kwargs", "to send to Azure. :param name: Required. Name of the", "**kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str", "and the new desired retention period will be effective within", "The primary storage account must be a Standard Storage account", ":param description: Optional video description provided by the user. Value", "the operation is performed. :type resource: str :param operation: The", "access for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public", "value: A collection of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param", "details. Variables are only populated by the server, and will", "def __init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code = None", "'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},", "the time. :type is_in_use: bool \"\"\" _validation = { 'can_stream':", "if the video already exists. :param title: Optional title provided", "True}, 'alg': {'required': True}, 'x': {'required': True}, 'y': {'required': True},", "None) self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint describes an", "None self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access", "= '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = kwargs['username'] self.password =", ":type error: ~video_analyzer.models.ErrorDetail \"\"\" _attribute_map = { 'error': {'key': 'error',", ":type disable_archive: str :param disable_rtsp_publishing: When set to 'true' the", "Possible values include: \"Succeeded\", \"Creating\", \"Deleting\", \"Failed\". :vartype provisioning_state: str", "True}, } _attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'},", "a topology to be parameterized. This allows individual pipelines refer", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'edge_module_id':", "Set of URLs to the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls", "class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource management error additional info. Variables are", "): super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class", "contains all claims and respective values for it to be", "for a Video Analyzer account. Variables are only populated by", "'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData',", "_validation = { 'id': {'required': True}, 'status': {'readonly': True}, }", "**kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.sku =", "values for parameters which have been declared in the referenced", "kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties to be used in", "values include: \"Archive\", \"File\". :vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar", "value for the \"token\" query string parameter. The token is", "_attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key':", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def", "to 'true', then \"disableArchive\" must be set to 'false'. :type", "type: str self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a built-in", "'#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__( self, **kwargs ): super(NodeBase, self).__init__(**kwargs)", "an operation on the live pipeline. Variables are only populated", "where the resource lives. :type location: str \"\"\" _validation =", "video. Newly created videos have this value set to false.", "the service provider require any updates on the consumer. :type", "be defined through the use of user-defined parameters, which allow", "'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__( self, **kwargs", "self).__init__(**kwargs) self.type = None # type: Optional[str] class CertificateSource(msrest.serialization.Model): \"\"\"Base", "preview image URL. :type large: str \"\"\" _attribute_map = {", "present on the JWT token header. :type kid: str \"\"\"", "kwargs.get('height', None) self.width = kwargs.get('width', None) self.mode = kwargs.get('mode', None)", "{ 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type':", "'@type', 'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key':", "be ignored when sending a request. :param tags: A set", "topologies where \"kind\" is set to \"live\". :type segment_length: str", "'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type':", "'can_stream': {'key': 'canStream', 'type': 'bool'}, 'has_data': {'key': 'hasData', 'type': 'bool'},", "'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description',", "conjunction with the video content authorization token to download the", "picks up recorded media within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase", "default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition]", "{'key': 'preset', 'type': 'EncoderPresetBase'}, } def __init__( self, **kwargs ):", "pipeline will process content according to the pipeline topology definition.", "Possible values include: \"Inactive\", \"Activating\", \"Active\", \"Deactivating\". :vartype state: str", ":param is_in_use: Required. Value indicating whether or not the video", "be streamed through Azure Video Analyzer Player Widget or compatible", "for access policies authentication methods. You probably want to use", "minutes, in 30 seconds increments. Changing this value after the", "archive. Default value is 30 seconds. This property is only", "Optional description provided by the user. Value can be up", "collection of VideoAnalyzer items. :param value: A collection of VideoAnalyzer", "tags: dict[str, str] :param location: Required. The geo-location where the", "information. :param height: The desired output video height. :type height:", "None class JwtAuthentication(AuthenticationBase): \"\"\"Properties for access validation based on JSON", "allows individual pipelines refer to different values, such as individual", "\"\"\" _validation = { 'id': {'readonly': True}, } _attribute_map =", "validation based on JSON Web Tokens (JWT). All required parameters", "= kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.properties = kwargs.get('properties',", "for media from an RTSP camera or generic RTSP server", "{'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, }", ":type password: str \"\"\" _validation = { 'type': {'required': True},", "= None self.additional_info = None class ErrorResponse(msrest.serialization.Model): \"\"\"Common error response", "'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHub,", "'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs)", "= { 'type': {'required': True}, 'iot_hub_name': {'required': True}, 'device_id': {'required':", "'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "} _attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'}, 'identity':", "# type: str self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder',", "self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content token", "key_identifier: str :ivar current_key_identifier: The current key used to encrypt", "list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access is", "Currently, there can be only one range specified in the", "'[str]'}, } def __init__( self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type", "last_modified_by_type: The type of identity that last modified the resource.", "\"live\". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\" _validation = { 'type': {'required':", "# type: Optional[str] class TlsEndpoint(EndpointBase): \"\"\"TLS endpoint describes an endpoint", "type_properties_type: Video content type. Different content types are suitable for", "pipeline topology. When activated, this pipeline job will process content", "'type': {'required': True}, 'iot_hub_name': {'required': True}, 'device_id': {'required': True}, }", "{'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type':", "= kwargs.get('tags', None) self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint", "A collection of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link:", "of \"${PARAMETER_NAME}\" string pattern. Parameters can have optional default values", ":type required_zone_names: list[str] \"\"\" _validation = { 'id': {'readonly': True},", "define the recipe or instructions on how audio should be", "} def __init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type =", "be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation = { 'id':", "__init__( self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "token. The Azure Video Analyzer IoT edge module must be", "'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'}, } def", "sequence of absolute datetime ranges as a string. The datetime", "= kwargs.get('transport', None) self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): \"\"\"Base class", "encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key", "= '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options", "True}, } _attribute_map = { 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__(", "kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity', None) self.status", "pipeline operation. :vartype status: str :ivar error: The error details", "value after the initial call to create the video resource", "def __init__( self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type = None", "None self.info = None class ErrorDetail(msrest.serialization.Model): \"\"\"The error detail. Variables", ":type name: str :param inputs: Required. An array of upstream", "Video Analyzer resource. Variables are only populated by the server,", "for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The", ":vartype target: str :ivar details: The error details. :vartype details:", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg = kwargs['alg']", "~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline job", "encoding the input content using the encoder processor. All required", "token properties. A provisioning token allows for a single instance", "'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'},", "{ 'client_id': {'readonly': True}, 'principal_id': {'readonly': True}, } _attribute_map =", "for video and audio to be captured, optionally archived, and", "for all Azure Resource Manager resources. Variables are only populated", "ignore_signature: str \"\"\" _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type':", "by server. :type type: str :param certificates: Required. PEM formatted", "will agree on a set of authentication keys which will", "self.preview_image_urls = kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties to be", "from a RTSP camera and archives the content can be", "__init__( self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None self.required_members", "'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs)", ":param name: The name of the resource for which availability", "this class directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required", "str :param identity: The identities associated to the Video Analyzer", "specific pipeline topology parameter. See pipeline topology parameters for more", "class LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic log emitted by service. Variables are", "Indicates whether the connection has been Approved/Rejected/Removed by the owner", "values to be used when they are not defined in", "super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): \"\"\"Required validation properties", "{'readonly': True}, 'edge_module_id': {'readonly': True}, } _attribute_map = { 'id':", "not this class directly. Known sub-classes are: VideoSink. All required", ":vartype status: str \"\"\" _validation = { 'type': {'required': True},", "): super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description',", "None self.source_mdm_account = None self.source_mdm_namespace = None self.supported_time_grain_types = None", "about the video and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param", "True}, } _attribute_map = { 'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},", ":vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The source MDM account. :vartype", "info: The additional info. :vartype info: any \"\"\" _validation =", "self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class", "super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates =", "the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError", "width. :type width: str :param mode: Describes the video scaling", "'archive' and video archiving is enabled. :type archive_base_url: str :param", "exponent. :type e: str \"\"\" _validation = { 'type': {'required':", "system_data: ~video_analyzer.models.SystemData :param title: Optional video title provided by the", "_validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type':", "{'required': True}, 'iot_hub_name': {'required': True}, 'device_id': {'required': True}, } _attribute_map", ":vartype system_data: ~video_analyzer.models.SystemData \"\"\" _validation = { 'id': {'readonly': True},", "has_data: Required. Value indicating whether or not there has ever", "coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights", "# type: str class VideoEntity(ProxyResource): \"\"\"Represents a video resource within", "and last modification of the resource. :param created_by: The identity", "{'key': 'scale', 'type': 'VideoScale'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264':", "video resource needs to be created on the service. These", "sink in a live topology allows for video and audio", "service. Possible values include: \"Pending\", \"Approved\", \"Rejected\". :type status: str", "validation to be skipped. Default is 'false'. :type ignore_signature: str", "self.required_zone_names = kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list of private", "'[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self,", "bool :param reason: The reason why the given name is", "presets, which define the recipe or instructions on how audio", "'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'},", "Analyzer. Videos can be ingested from RTSP cameras through live", "these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\" _validation = { 'type':", "from the camera. It will retry to re-establish connection (with", "'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod',", "video properties to be used in case a new video", "to be ingested into a pipeline. Currently supported only with", "under the MIT License. See License.txt in the project root", "of the claim to be present on the token. :type", "null list designates that Azure Video Analyzer's list of trusted", "None) self.sinks = kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection of", "'SecureIotDeviceRemoteTunnel'} } def __init__( self, **kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type", "accessing a resource. :type user_assigned_identity: str \"\"\" _validation = {", ":type type: str :param kid: Required. JWT token key id.", "_attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key':", "group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access for", "archive playback latency but generate larger volume of storage transactions.", "__init__( self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' #", "Private Endpoint. :vartype id: str \"\"\" _validation = { 'id':", "self).__init__(**kwargs) self.name = None self.display_name = None self.to_be_exported_for_shoebox = None", "unit: The metric unit. Possible values include: \"Bytes\", \"Count\", \"Milliseconds\".", "be stored as a file, and published via a video", "information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing", "{'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map", "value: list[~video_analyzer.models.PipelineJob] :param next_link: A link to the next page", "{'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources':", "for a Azure Resource Manager proxy resource. It will not", "populated in order to send to Azure. :param name: Required.", "= kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence of datetime ranges", "'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'preset':", "Microsoft.Storage). :type id: str :param identity: A managed identity that", "to be captured, optionally archived, and published via a video", "self.alg = kwargs['alg'] self.n = kwargs['n'] self.e = kwargs['e'] class", "parameter. The token is specific to a single video. :vartype", ":ivar error: Details about the error, in case the pipeline", "str \"\"\" _validation = { 'key_identifier': {'required': True}, 'current_key_identifier': {'readonly':", "of the parameter. :type description: str :param default: The default", "name: str :param type: The resource type. :type type: str", "here can be referenced throughout the topology nodes through the", "live pipelines in your account. :type bitrate_kbps: int :ivar state:", "from an RTSP camera or generic RTSP server to be", "~video_analyzer.models.ActionType \"\"\" _validation = { 'name': {'required': True}, } _attribute_map", "self.provider = kwargs.get('provider', None) self.resource = kwargs.get('resource', None) self.operation =", "or height need be provided. Possible values include: \"Pad\", \"PreserveAspectRatio\",", "= kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update operation for a", "or ~video_analyzer.models.VideoType :ivar flags: Video flags contain information about the", ":ivar code: The error code. :vartype code: str :ivar message:", "account (either Microsoft.ClassicStorage or Microsoft.Storage). :type id: str :param identity:", "used for real-time ingestion, archiving and publishing of content for", "an existing pipeline topology defined for real-time content processing. When", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'audio_encoder': {'key':", "'keys': {'key': 'keys', 'type': '[TokenKey]'}, } def __init__( self, **kwargs", "include: \"Http\", \"Tcp\". :type transport: str or ~video_analyzer.models.RtspTransport :param endpoint:", "be ignored when sending a request. :ivar name: The name", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint:", "'str'}, } def __init__( self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title", "} _attribute_map = { 'endpoint_url': {'key': 'endpointUrl', 'type': 'str'}, 'type':", "'#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class AuthenticationBase(msrest.serialization.Model): \"\"\"Base class for access", "to Azure. :param type: Required. The type of key used", "IoT edge module in case the module state lost or", "None) class VideoSource(SourceNodeBase): \"\"\"Video source allows for content from a", "None self.additional_info = None class ErrorResponse(msrest.serialization.Model): \"\"\"Common error response for", ":type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to the next", "SourceNodeBase(NodeBase): \"\"\"Base class for topology source nodes. You probably want", "self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status =", "value: A collection of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param", "'properties.requiredZoneNames', 'type': '[str]'}, } def __init__( self, **kwargs ): super(PrivateLinkResource,", "type: Optional[str] class TokenKey(msrest.serialization.Model): \"\"\"Key properties for JWT token validation.", "): super(NodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name", "'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs)", ":type kid: str :param alg: Required. Elliptical curve algorithm to", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId',", "**kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model):", "} def __init__( self, **kwargs ): super(VideoSource, self).__init__(**kwargs) self.type =", "'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs',", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def", "self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type:", "on how the input video should be processed. You probably", "**kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "data-plane. :type is_data_action: bool :param action_type: Indicates the action type.", "tunnel objects. You probably want to use the sub-classes and", "class for endpoints. You probably want to use the sub-classes", "def __init__( self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access',", "send to Azure. :param user_assigned_identity: Required. The user assigned managed", "used only for archiving content. Default is 'false'. If set", "media within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\" _validation =", "'@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, } _subtype_map", "= kwargs.get('status', None) self.description = kwargs.get('description', None) self.actions_required = kwargs.get('actions_required',", "unique instance of a batch topology, used for offline processing", ":type type: str :param username: Required. Username to be presented", "def __init__( self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", "= { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials',", "Azure. :param name: Required. The SKU name. Possible values include:", "uses the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale", "class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of private endpoint connection operation. All required", "'ignoreSignature', 'type': 'str'}, } def __init__( self, **kwargs ): super(TlsValidationOptions,", "_validation = { 'expiration_date': {'readonly': True}, 'token': {'readonly': True}, }", "in the pipeline topology. :type name: str :param value: Parameter", "blob. :vartype blob_duration: str \"\"\" _validation = { 'name': {'readonly':", "Optional video description provided by the user. Value can be", "{'readonly': True}, 'flags': {'readonly': True}, 'content_urls': {'readonly': True}, } _attribute_map", "list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "True}, 'group_id': {'readonly': True}, 'required_members': {'readonly': True}, } _attribute_map =", "__init__( self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None self.token", "class IotHub(msrest.serialization.Model): \"\"\"The IoT Hub details. Variables are only populated", "Possible values include: \"Http\", \"Tcp\". :type transport: str or ~video_analyzer.models.RtspTransport", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type': 'str'},", "to send to Azure. :ivar id: Fully qualified resource ID", "created the resource. :type created_by: str :param created_by_type: The type", "Video Analyzer resource. All required parameters must be populated in", "property is only allowed for topologies where \"kind\" is set", "be an active pipeline. The fact that is being referenced,", "module in case the module state lost or reset. Variables", "are returned in the response for all Azure Resource Manager", "be reused across many pipeline instances which share the same", "case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters:", "authorization token to expose a WebSocket tunneled RTSP stream. It", "IS08601, and the sum of the ranges should add up", "X coordinate. :type x: str :param y: Required. Y coordinate.", "identity: The Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status:", "def __init__( self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type']", "SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote tunnel securely established using IoT Hub device", "self, **kwargs ): super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height', None) self.width", "Provisioning state of the Video Analyzer account. Possible values include:", "Azure Resource Manager resources. Variables are only populated by the", "is only allowed for topologies where \"kind\" is set to", "principal_id: str \"\"\" _validation = { 'client_id': {'readonly': True}, 'principal_id':", "'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription',", "kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection of LivePipeline items. :param", "input video should be processed. You probably want to use", "self, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type = None # type:", "\"\"\"The user assigned managed identity to use when accessing a", "of the resource. :param created_by: The identity that created the", "failed operations. (This also follows the OData error response format.).", "= kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity', None) self.status = None", "an operation on the pipeline job. Variables are only populated", "how the input video should be processed. You probably want", "in each blob. :vartype blob_duration: str \"\"\" _validation = {", "the endpoint. :type endpoint_url: str :param type: Required. The type", "'systemData', 'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type': 'str'}, 'required_members': {'key':", "{'key': 'properties.archival', 'type': 'VideoArchival'}, } def __init__( self, **kwargs ):", ":ivar target: The error target. :vartype target: str :ivar details:", "'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'},", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def", "name. :type name: str :param display: The operation display name.", "None) class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties to be used in case", "'[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self,", "Required. Elliptical curve algorithm to be used: ES256, ES384 or", "Web Tokens (JWT). All required parameters must be populated in", "dict[str, str] :param location: Required. The geo-location where the resource", "{'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters':", "None self.target = None self.details = None self.additional_info = None", "= None class MetricDimension(msrest.serialization.Model): \"\"\"A metric dimension. Variables are only", "The IoT Hub resource identifier. :type id: str :param identity:", "send to Azure. :param node_name: Required. The name of the", "is 'false'. :type ignore_signature: str \"\"\" _attribute_map = { 'ignore_hostname':", "'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__( self,", "system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline", "'type': 'str'}, } def __init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs)", "sub-classes and not this class directly. Known sub-classes are: EncoderCustomPreset,", "'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason', 'type': 'str'},", "'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key':", "{'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'}, }", ":ivar aggregation_type: The metric aggregation type. Possible values include: \"Average\",", "resource name is available. :type name_available: bool :param reason: The", "**kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model):", "A managed identity that Video Analyzer will use to access", "__init__( self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier", "'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'},", "lost or reset. Variables are only populated by the server,", "whether regional MDM account is enabled. :vartype enable_regional_mdm_account: bool :ivar", "the pipeline. :type description: str :ivar state: Current state of", "in different resolutions. They are available when the video type", "'name', 'type': 'str'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'time_sequences': {'key':", "30 seconds increments. Changing this value after the initial call", "'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } def __init__( self,", "'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__(", "def __init__( self, **kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period',", "by server. :type type: str :param bitrate_kbps: Bitrate, in kilobits", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of", "~datetime.datetime :ivar token: The token blob to be provided to", "values include: \"Internal\". :type action_type: str or ~video_analyzer.models.ActionType \"\"\" _validation", "): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None self.required_members = None self.required_zone_names", "in 1 day increments. When absent (null), all video content", "Kbps, at which audio should be encoded (2-channel stereo audio", "__init__( self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' #", "IoT Hub details. Variables are only populated by the server,", "used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] \"\"\"", "account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault identity.", "= kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private Endpoint resource. Variables", "the resource. :param created_by: The identity that created the resource.", "'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'},", "type: str :param name: Required. Name of the built-in encoding", "a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description", "players by appending the following to the base URL: ..", "\"\"\" _validation = { 'can_stream': {'required': True}, 'has_data': {'required': True},", "class VideoSink(SinkNodeBase): \"\"\"Video sink in a live topology allows for", "'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityRequest,", "in order to send to Azure. :param user_assigned_identity: Required. The", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the", "It is recommended that the expected use of the topology", "'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__(", "datetime values should follow IS08601, and the sum of the", "\"\"\"A custom preset for encoding video with the H.264 (AVC)", "def __init__( self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type = None", "'status': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type',", "None self.required_zone_names = kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list of", "None) self.operation = kwargs.get('operation', None) self.description = kwargs.get('description', None) class", "generate registration token for the Azure Video Analyzer IoT edge", "class directly. Known sub-classes are: UsernamePasswordCredentials. All required parameters must", "RSA public key exponent. :type e: str \"\"\" _validation =", "Video file download URL. This URL can be used in", "= { 'value': {'key': 'value', 'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink',", "generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase \"\"\" _validation = {", "= kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): \"\"\"Describes an input signal to", "'str'}, } def __init__( self, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags", "self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model):", "self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link", "None) self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks", "to. This contains the required information for Video Analyzer to", "name_available: bool :param reason: The reason why the given name", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The", ":param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at", "the instance of the Video Analyzer edge module. :vartype edge_module_id:", "when establishing the remote tunnel. This string is case-sensitive. :type", "the IoT edge module will agree on a set of", ":type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted certificate authorities", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'group_id': {'readonly': True}, 'required_members':", "{ 'value': {'key': 'value', 'type': '[VideoAnalyzer]'}, } def __init__( self,", "{'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly':", "details: The error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The", ":type start_time: str :param end_time: Operation end time. :type end_time:", "class JwtAuthentication(AuthenticationBase): \"\"\"Properties for access validation based on JSON Web", "Private Endpoint resource. Variables are only populated by the server,", "match exactly one key. :type keys: list[~video_analyzer.models.TokenKey] \"\"\" _validation =", "kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for", "by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of", "The operation type. :type operation: str :param description: The operation", "_validation = { 'type': {'readonly': True}, 'info': {'readonly': True}, }", "PEM formatted certificates. All required parameters must be populated in", "None) self.mode = kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence of", "'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale',", "def __init__( self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None", "a video resource within Azure Video Analyzer. Videos can be", "to be achieved and can be reused across many pipeline", "flags used to change how video is published. These are", "include: \"Failed\", \"InProgress\", \"Succeeded\". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges = kwargs['ranges']", "\"ES512\". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X", "the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase \"\"\"", "None) self.properties = kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action', None) self.action_type", "kwargs['name'] self.tier = None class StorageAccount(msrest.serialization.Model): \"\"\"The details about the", "must be greater than zero, and less than or equal", "the same processing characteristics. For instance, a pipeline topology which", "used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param", "ranges. Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type ranges: str \"\"\" _validation =", "other destinations. Variables are only populated by the server, and", "self.dimensions = None self.enable_regional_mdm_account = None self.source_mdm_account = None self.source_mdm_namespace", "or equal to 300. If omitted, the encoder uses the", "None self.required_members = None self.required_zone_names = kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model):", "{'key': 'reason', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, }", "claims and respective values for it to be valid. :type", "device_id: str \"\"\" _validation = { 'type': {'required': True}, 'iot_hub_name':", "can be downloaded as MP4 files. Variables are only populated", "of the Key Vault mapping. :vartype status: str \"\"\" _validation", "an specific pipeline topology parameter. See pipeline topology parameters for", "or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate. :type x: str", "\"Key\". :type created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at: The timestamp", "__init__( self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None self.metric_specifications", "the Azure Video Analyzer IoT edge module. All required parameters", "the pipelines. All required parameters must be populated in order", "scenarios. Possible values include: \"Archive\", \"File\". :vartype type_properties_type: str or", "per entry. :type certificates: list[str] \"\"\" _validation = { 'type':", ":ivar type_properties_type: Video content type. Different content types are suitable", "regional MDM account is enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account:", "parameters for more information. All required parameters must be populated", "} def __init__( self, **kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name =", "self.tags = kwargs.get('tags', None) self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured", "'type': {'required': True}, 'name': {'required': True}, } _attribute_map = {", "set of authentication keys which will be auto-rotated as long", "the RTP packets are interleaved on the TCP RTSP connection.", "self.error = None self.parameters = kwargs.get('parameters', None) class PipelineTopology(ProxyResource): \"\"\"Pipeline", "'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type':", "optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation = {", "{ 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True},", "'str'}, } def __init__( self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status", "self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder = kwargs.get('audio_encoder', None)", "{'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, }", "'@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoEntityCollection,", "self.identity = kwargs.get('identity', None) self.status = None class SystemData(msrest.serialization.Model): \"\"\"Metadata", "= kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None", "'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'},", "pipeline. The allowed range is from 500 to 3000 Kbps", "in kilobits per second or Kbps, at which audio should", "'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__( self, **kwargs ): super(AudioEncoderBase,", "used if the pipeline does not specify a value. :type", "user_assigned_identity: str \"\"\" _validation = { 'user_assigned_identity': {'required': True}, }", "directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must", "def __init__( self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint'", ":vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'id': {'readonly': True},", "{'readonly': True}, 'system_data': {'readonly': True}, 'edge_module_id': {'readonly': True}, } _attribute_map", "pipeline operation. :vartype name: str :ivar status: The status of", "'scale', 'type': 'VideoScale'}, } def __init__( self, **kwargs ): super(VideoEncoderH264,", "'type': {'required': True}, 'certificates': {'required': True}, } _attribute_map = {", "kwargs.get('audiences', None) self.claims = kwargs.get('claims', None) self.keys = kwargs.get('keys', None)", "exponential backoff), checking to see if the camera bitrate is", "'audiences': {'key': 'audiences', 'type': '[str]'}, 'claims': {'key': 'claims', 'type': '[TokenClaim]'},", "self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.sku", "endpoint describes an endpoint that the pipeline can connect to", "on how the input content should be processed. You probably", "is 'PreserveAspectRatio' then only one of width or height need", "will be ignored when sending a request. :ivar log_specifications: List", "metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\" _validation = { 'log_specifications':", "media_info: Contains information about the video and audio content. :type", "= { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier',", "{ 'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly': True}, } _attribute_map =", "{'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type':", "96, 112, 128, 160, 192, 224, and 256. If omitted,", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials':", "'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount',", "is 'archive' and preview images are enabled. :param small: Low", "'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'},", "str :ivar error: The error details for the live pipeline", "= { 'key_identifier': {'required': True}, 'current_key_identifier': {'readonly': True}, } _attribute_map", "more information. All required parameters must be populated in order", "order to send to Azure. :param type: Required. The identity", "sending a request. :ivar expiration_date: The expiration date of the", "long. :type description: str :param segment_length: Segment length indicates the", "self.id = kwargs['id'] self.identity = kwargs.get('identity', None) self.status = None", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location':", "{'required': True}, 'x': {'required': True}, 'y': {'required': True}, } _attribute_map", "not this class directly. Known sub-classes are: EccTokenKey, RsaTokenKey. All", "self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class RtspSource(SourceNodeBase): \"\"\"RTSP source", "\"\"\" _validation = { 'client_id': {'readonly': True}, 'principal_id': {'readonly': True},", "example https://vault/keys/mykey/version1) or reference a key without a version (for", "'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},", "**kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str", "consumer. :type actions_required: str \"\"\" _attribute_map = { 'status': {'key':", "key exponent. :type e: str \"\"\" _validation = { 'type':", "{ 'type': {'key': '@type', 'type': 'str'}, 'username': {'key': 'username', 'type':", "send to Azure. :param name: Required. Operation identifier. :type name:", "{'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, }", "a request. :ivar log_specifications: List of log specifications. :vartype log_specifications:", "} def __init__( self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value =", "self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers = kwargs.get('issuers', None)", "def __init__( self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint'", "populated in order to send to Azure. :param id: Required.", "= kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties to be used", "based on the key id present on the JWT token", "class AccountEncryption(msrest.serialization.Model): \"\"\"Defines how the Video Analyzer account is (optionally)", "'sku': {'required': True}, } _attribute_map = { 'id': {'key': 'id',", "'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type':", "module state lost or reset. Variables are only populated by", "a file, and published via a video resource of type", "class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection of EdgeModuleEntity items. :param value: A", "or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str]", "{'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ):", "self.frame_rate = kwargs.get('frame_rate', None) self.scale = kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase):", "encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom", "} def __init__( self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type =", "{'readonly': True}, 'state': {'readonly': True}, } _attribute_map = { 'id':", "List of the topology sink nodes. Sink nodes allow pipeline", "low-latency feed is available from the source. :type rtsp_tunnel_url: str", "collection of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A", "list[~video_analyzer.models.LivePipeline] :param next_link: A link to the next page of", "be used: ES256, ES384 or ES512. Possible values include: \"ES256\",", "not this class directly. Known sub-classes are: AudioEncoderAac. All required", "'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, } def __init__(", "~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map = { 'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'},", "'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__(", "= kwargs.get('issuers', None) self.audiences = kwargs.get('audiences', None) self.claims = kwargs.get('claims',", "'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error':", "will be ignored when sending a request. :ivar id: Fully", "The content token expiration date in ISO8601 format (eg. 2021-01-01T00:00:00Z).", "'videoName', 'type': 'str'}, 'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'}, } def", "directly. Known sub-classes are: VideoSink. All required parameters must be", "validating client API access. :type authentication: ~video_analyzer.models.AuthenticationBase \"\"\" _validation =", "which allow for data to be stored or exported to", "**kwargs ): super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str", "derived types.Constant filled by server. :type type: str :param name:", "= kwargs.get('operation', None) self.description = kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single", "kid: Required. JWT token key id. Validation keys are looked", "'id': {'key': 'id', 'type': 'str'}, } def __init__( self, **kwargs", "'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'},", "kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): \"\"\"The service metric specifications.", "class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type for all audio encoder presets, which", "connection. :type description: str :param actions_required: A message indicating if", "default: The default value for the parameter to be used", "AAC codec. All required parameters must be populated in order", "dynamic properties based on the current video state. :vartype flags:", "'required_members': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "sequence of datetime ranges as a string. You probably want", "TLS endpoints. :param ignore_hostname: When set to 'true' causes the", "the MIT License. See License.txt in the project root for", "whether or not the video can be streamed. Only \"archive\"", "URL. :type medium: str :param large: High resolution preview image", "class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of an operation", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'next_link':", "30 seconds to 5 minutes, in 30 seconds increments. :type", "{'required': True}, 'y': {'required': True}, } _attribute_map = { 'type':", "__init__( self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' #", "to \"live\". :param disable_archive: When set to 'true' content will", "collection of AccessPolicyEntity items. :param value: A collection of AccessPolicyEntity", "be accessible at the time. :type is_in_use: bool \"\"\" _validation", "a unique instance of a batch topology, used for offline", "'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'type_properties_type': {'key': 'properties.type', 'type':", "= '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = kwargs.get('transport', None) self.endpoint", "storage account must be a Standard Storage account (either Microsoft.ClassicStorage", "the cameras. Individual instance properties can be defined through the", "TCP RTSP connection. When using HTTP, the RTSP messages are", "to the cloud account. The provisioning token itself is short", "to. :type url: str :param tunnel: Describes the tunnel through", "'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def", "= { 'segment_length': {'key': 'segmentLength', 'type': 'str'}, } def __init__(", "and can be reused across many pipeline instances which share", "display: ~video_analyzer.models.OperationDisplay :param origin: Origin of the operation. :type origin:", "\"\"\"Required validation properties for tokens generated with RSA algorithm. All", "only picks up recorded media within these ranges. :type time_sequences:", "_validation = { 'expiration_date': {'required': True}, } _attribute_map = {", "matches at least one of the given values. :type audiences:", "self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link", "self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing", "): super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts',", "= { 'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly': True}, } _attribute_map", "used by the Video Analyzer resource. Variables are only populated", "'str'}, } def __init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name", "of the key used to encrypt the account. :type key_vault_properties:", "to_be_exported_for_shoebox: bool \"\"\" _validation = { 'name': {'readonly': True}, 'display_name':", "NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access control for video analyzer account. :param integration:", "**kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message =", "be published, disabling low latency streaming. This is used, for", "SinkNodeBase, SourceNodeBase. All required parameters must be populated in order", "): super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder", "in the granularity of days, up to a maximum of", "# type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check availability request body.", "_validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target':", "messages. Possible values include: \"Http\", \"Tcp\". :type transport: str or", "'str'}, 'info': {'key': 'info', 'type': 'object'}, } def __init__( self,", "= kwargs.get('width', None) self.mode = kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A", "\"token\" query string parameter. The token is specific to a", ":type location: str :param identity: The identities associated to the", "which the operation is performed. :type resource: str :param operation:", "} def __init__( self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type =", "mode is 'Pad' or 'Stretch' then both width and height", "display name. :type display: ~video_analyzer.models.OperationDisplay :param origin: Origin of the", ":type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation = { 'type':", "when sending a request. :ivar code: The error code. :vartype", "values. :type issuers: list[str] :param audiences: List of expected token", "type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check availability request body. :param", "details. :param provider: The service provider. :type provider: str :param", "\"\"\"Defines how the Video Analyzer account is (optionally) encrypted. Variables", "def __init__( self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date']", "__init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' #", "} def __init__( self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type =", "of a batch topology, used for offline processing of selected", "} _attribute_map = { 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications':", "via the video resource. This property is only allowed for", "self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url", "dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation = { 'type': {'required': True}, }", "Name of the parameter. :type name: str :param type: Required.", "bool :ivar source_mdm_account: The source MDM account. :vartype source_mdm_account: str", "self.description = kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None) self.retention_period =", "and height must be specified. Else if the mode is", "sources: List of the topology source nodes. Source nodes enable", "These URLs can be used in conjunction with the video", "module will agree on a set of authentication keys which", "'@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'}", "__init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name = None self.display_name", "ignored when sending a request. :ivar client_id: The client ID.", "'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__(", "AccessPolicyEntity items. :param value: A collection of AccessPolicyEntity items. :type", "include: \"ES256\", \"ES384\", \"ES512\". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param", "None class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access control for video analyzer account.", "Resource(msrest.serialization.Model): \"\"\"Common fields that are returned in the response for", "video streaming. Default is 'false'. If set to 'true', then", "'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key':", "{'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error':", "items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link: A link to the", "kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): \"\"\"Video source", "): super(PrivateEndpoint, self).__init__(**kwargs) self.id = None class PrivateEndpointConnection(Resource): \"\"\"The Private", "operation description. :type description: str \"\"\" _attribute_map = { 'provider':", "User Assigned Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation", "): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The", "type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate', None)", "values include: \"Average\", \"Count\", \"Total\". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType", "= { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message',", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'role':", "None) class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token properties. A provisioning token allows", "'n': {'key': 'n', 'type': 'str'}, 'e': {'key': 'e', 'type': 'str'},", "there has ever been data recorded or uploaded into the", "batch topology, used for offline processing of selected portions of", "__init__( self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None)", "same processing is to be applied across all the cameras.", "equals 30 seconds) and can vary between 30 seconds to", "per second) of the encoded video. The value must be", "accessible at the time. :type is_in_use: bool \"\"\" _validation =", "operation. :vartype name: str :ivar status: The status of the", "custom preset for encoding audio with the AAC codec. All", "self.source_mdm_namespace = None self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access", "True}, 'supported_time_grain_types': {'readonly': True}, } _attribute_map = { 'name': {'key':", "for it to be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys:", "= kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): \"\"\"Properties", "resource which has 'tags' and a 'location'. Variables are only", "the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description:", "__init__( self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity', None)", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'provisioning_state':", "to encrypt Video Analyzer account, including the key version. :vartype", "str :ivar details: The error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar", "= { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map =", "'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo',", "type: str :param bitrate_kbps: The maximum bitrate, in kilobits per", "as MP4 files. Variables are only populated by the server,", "_attribute_map = { 'value': {'key': 'value', 'type': '[PipelineJob]'}, 'next_link': {'key':", "storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource.", "edge module. Variables are only populated by the server, and", "'false'. :type disable_rtsp_publishing: str \"\"\" _attribute_map = { 'disable_archive': {'key':", "WebSocket tunneled RTSP stream. It is available when the video", "str :ivar principal_id: The principal ID. :vartype principal_id: str \"\"\"", "self.required_members = None self.required_zone_names = kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A", "} def __init__( self, **kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name =", "stereo audio at a sampling rate of 48 kHz). Allowed", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'next_link':", "time_sequences: Required. Describes a sequence of datetime ranges. The video", "self.default = kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the parameter value", "{'key': 'keys', 'type': '[TokenKey]'}, } def __init__( self, **kwargs ):", "__init__( self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None self.status", ":type ranges: str \"\"\" _validation = { 'type': {'required': True},", "kid: str :param alg: Required. Elliptical curve algorithm to be", ":vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation = { 'code': {'readonly': True},", "Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype", "reserved capacity. Doing so will ensure that one 'noisy neighbor'", "self.description = kwargs.get('description', None) self.actions_required = kwargs.get('actions_required', None) class Properties(msrest.serialization.Model):", "VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence of absolute datetime ranges as a string.", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'type_properties_type':", "to the base URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 -", ":ivar expiration_date: The expiration date of the registration token. The", "and preview images are enabled. :param small: Low resolution preview", "sequence of datetime ranges. The video source only picks up", "**kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link =", "live pipelines or can be created by exporting sequences from", "'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } def", "ignored when sending a request. :ivar expiration_date: The content token", "or Kbps, at which audio should be encoded (2-channel stereo", "with batch pipelines. All required parameters must be populated in", "'str'}, } def __init__( self, **kwargs ): super(VideoScale, self).__init__(**kwargs) self.height", "str :param alg: Required. RSA algorithm to be used: RS256,", ":ivar client_id: The client ID. :vartype client_id: str :ivar principal_id:", "pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name':", "self, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.location", "'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self, **kwargs ): super(TimeSequenceBase,", "'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type':", "ingested into a pipeline. All required parameters must be populated", "and scenarios. Possible values include: \"Archive\", \"File\". :vartype type_properties_type: str", "'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key':", "kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): \"\"\"Base class for tunnel objects. You probably", "unique within the topology. :type name: str \"\"\" _validation =", "): super(IotHub, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs['identity'] self.status", "VideoScale(msrest.serialization.Model): \"\"\"The video scaling information. :param height: The desired output", "{ 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, } def __init__( self,", ":type id: str :param identity: A managed identity that Video", "id. :vartype group_id: str :ivar required_members: The private link resource", "self.mode = kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence of absolute", "and not this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint.", "through Azure Video Analyzer Player Widget or compatible players. Exported", "video and audio to be captured, optionally archived, and published", "information. :vartype system_data: ~video_analyzer.models.SystemData :param role: Defines the access level", "in 30 seconds increments. :type segment_length: str \"\"\" _attribute_map =", "self.description = kwargs.get('description', None) self.state = None self.expiration = None", "resolution preview image URL. :type large: str \"\"\" _attribute_map =", "sources nodes such as an RTSP source which allows for", "temporarily from the camera. It will retry to re-establish connection", "mode to be applied. Default mode is 'Pad'. If the", "'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key':", "'tier': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name',", "'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, } def __init__(", "to be used: RS256, RS384 or RS512. Possible values include:", "str \"\"\" _attribute_map = { 'title': {'key': 'title', 'type': 'str'},", "token blob to be provided to the Azure Video Analyzer", ":ivar name: The name of the pipeline job operation. :vartype", "bitrate_kbps: str :param frame_rate: The frame rate (in frames per", "str :param device_id: Required. The IoT device id to use", "should be encoded (2-channel stereo audio at a sampling rate", "type: The resource type. :type type: str \"\"\" _attribute_map =", "of the current node. :type node_name: str \"\"\" _validation =", "None) class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content token grants access to the", "values for it to be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, }", "than or equal to 300. If omitted, the encoder uses", "'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type':", "When set to 'true' causes the certificate chain trust validation", "URL as the value for the \"token\" query string parameter.", "= { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__(", "Possible values include: \"Internal\". :type action_type: str or ~video_analyzer.models.ActionType \"\"\"", "private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map = {", "send to Azure. :ivar id: Fully qualified resource ID for", "the mode is 'Pad' or 'Stretch' then both width and", "ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access for consumption group.", "48 kHz). Allowed values are 96, 112, 128, 160, 192,", "self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints =", "The sequence of datetime ranges. Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type ranges:", "self.iot_hub_name = kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): \"\"\"The service", "} def __init__( self, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name =", "super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None) self.reason = kwargs.get('reason', None)", "type: str self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options", "video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\" _validation = { 'type': {'required':", "status of an operation on the pipeline job. Variables are", "camera and archives the content can be reused across many", "{'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel':", "kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed identity for the Video", ":type device_id: str \"\"\" _validation = { 'type': {'required': True},", "to the next page of the collection (when the collection", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset = kwargs['preset']", "= kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state = None", "this class directly. Known sub-classes are: UsernamePasswordCredentials. All required parameters", "mode: Describes the video scaling mode to be applied. Default", "for the live pipeline. The allowed range is from 500", "'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration',", "def __init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name = None", "through long lived HTTP connections, and the RTP packages are", "self.id = None self.name = None self.type = None self.system_data", ":type name: str :param display: The operation display name. :type", "'alg', 'type': 'str'}, 'x': {'key': 'x', 'type': 'str'}, 'y': {'key':", "Video retention period indicates the maximum age of the video", "operation. All required parameters must be populated in order to", "in transit). All required parameters must be populated in order", "used in conjunction with the video content authorization token to", "= kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information about the video and", ":type message: str \"\"\" _attribute_map = { 'code': {'key': 'code',", "\"\"\" _validation = { 'type': {'readonly': True}, 'info': {'readonly': True},", "list[str] :param audiences: List of expected token audiences. Token audience", "audio encoder presets, which define the recipe or instructions on", "LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic log emitted by service. Variables are only", "of the connection. :type description: str :param actions_required: A message", "\"InProgress\", \"Succeeded\". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private", "the topology to be used as inputs for this node.", "be ignored when sending a request. :ivar service_specification: The service", "values, such as individual cameras' RTSP endpoints and credentials. Overall", "content from a RTSP camera and archives the content can", "video MP4 file. The resulting MP4 file can be played", "the archive. Default value is 30 seconds. This property is", "~video_analyzer.models.ParameterType :param description: Description of the parameter. :type description: str", "= kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information about", "The value must be greater than zero, and less than", "minutes, in 30 seconds increments. :type segment_length: str \"\"\" _attribute_map", "can be streamed. Only \"archive\" type videos can be streamed.", "user assigned managed identity used by the Video Analyzer resource.", "of datetime ranges as a string. You probably want to", "type: str :param name: Required. Node name. Must be unique", "{'key': 'consumption', 'type': 'GroupLevelAccessControl'}, } def __init__( self, **kwargs ):", "self.name = kwargs['name'] class ProcessorNodeBase(NodeBase): \"\"\"Base class for topology processor", "sending a request. :ivar name: The diagnostic log category name.", "self).__init__(**kwargs) self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection of EdgeModuleEntity", "'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map =", "items. :param value: A collection of Operation items. :type value:", "'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs)", "encoded video. The value must be greater than zero, and", "tunnel securely established using IoT Hub device information. All required", "of the private endpoint connection resource. Possible values include: \"Succeeded\",", ":type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties. :type archival:", "None) self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class", "The source MDM account. :vartype source_mdm_account: str :ivar source_mdm_namespace: The", "recipe or instructions on how the input video should be", "encryption keys in Key Vault. Variables are only populated by", "True}, 'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map = { 'name': {'key':", "True}, 'blob_duration': {'readonly': True}, } _attribute_map = { 'name': {'key':", "**kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link =", "type for all audio encoder presets, which define the recipe", "resource. Possible values include: \"Succeeded\", \"Creating\", \"Deleting\", \"Failed\". :vartype provisioning_state:", "= kwargs['alg'] self.n = kwargs['n'] self.e = kwargs['e'] class SourceNodeBase(NodeBase):", "This property is only allowed for topologies where \"kind\" is", "older than 30 days will be periodically deleted. This value", "type is 'file' and video file is available for consumption.", "'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target',", "'str'}, } def __init__( self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname", "ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters must be populated in", "List of the topology source nodes. Source nodes enable external", "private link resource. Variables are only populated by the server,", "access to the video content URLs.\". Variables are only populated", "str class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details of the user assigned managed", "self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type:", "endpoints: The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint]", "topology and can optionally have default values to be used", "'properties.title', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'type_properties_type': {'key':", "= None # type: Optional[str] class CertificateSource(msrest.serialization.Model): \"\"\"Base class for", "Required. Password to be presented as part of the credentials.", "input audio is used. :type bitrate_kbps: str \"\"\" _validation =", "'VideoEncoderBase'}, } def __init__( self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type", "topology parameters for more information. All required parameters must be", "super(AudioEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps =", "Properties(msrest.serialization.Model): \"\"\"Metric properties. Variables are only populated by the server,", "Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'id':", "Azure Video Analyzer's list of trusted authorities should be used.", "'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus,", "'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource',", "'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs)", "= { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__( self, **kwargs", "parameters with a default value can be optionally be overridden.", "value: A collection of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param", "'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions':", "'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'},", "\"\"\" _validation = { 'name': {'required': True}, 'type': {'required': True},", "for example, when the topology is used only for archiving", ":type sku: ~video_analyzer.models.Sku :param description: An optional description of the", "authorization token on any compatible DASH or HLS players by", "kwargs.get('title', None) self.description = kwargs.get('description', None) self.type_properties_type = None self.flags", "and a live, low-latency feed is available from the source.", "have been declared in the referenced topology. Topology parameters without", "a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). :type id:", "provisioning token allows for a single instance of Azure Video", "kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information about the video and audio", "'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error',", "'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption',", "the authentication rules, and control access to specific video resources.", "'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def", "kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of", "methods. You probably want to use the sub-classes and not", "class LivePipeline(ProxyResource): \"\"\"Live pipeline represents a unique instance of a", "last modification of the resource. :param created_by: The identity that", "VideoEntity(ProxyResource): \"\"\"Represents a video resource within Azure Video Analyzer. Videos", "'UnsecuredEndpoint'} } def __init__( self, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type", ":type type: str \"\"\" _attribute_map = { 'name': {'key': 'name',", "error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'readonly': True}, 'status':", "The token is specific to a single video. :vartype token:", "and will be lost if the code is regenerated. #", "Declared parameters can and must be referenced throughout the topology", "self).__init__(**kwargs) self.expiration_date = None self.token = None class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base", "Whether or not public network access is allowed for specified", "when the video type is 'archive' and a live, low-latency", ":param audiences: List of expected token audiences. Token audience is", "title provided by the user. Value can be up to", "resource identifier. :type id: str :param identity: Required. The IoT", "which allow for a topology to be parameterized. This allows", "send to Azure. :param type: Required. The discriminator for derived", ":param name: Required. Name of the claim which must be", "A collection of information about the state of the connection", "An array of upstream node references within the topology to", "} def __init__( self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name =", "__init__( self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "be validated. Token must contains all claims and respective values", "\"ES256\", \"ES384\", \"ES512\". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x:", "default: str \"\"\" _validation = { 'name': {'required': True}, 'type':", "class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology parameter declaration. Declared parameters can and", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'iot_hub_name': {'key':", "'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map = {", "None) self.sku = kwargs.get('sku', None) self.description = kwargs.get('description', None) self.parameters", "self.sku = kwargs.get('sku', None) self.description = kwargs.get('description', None) self.parameters =", "topology nodes through the use of \"${PARAMETER_NAME}\" string pattern. Parameters", "super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class AuthenticationBase(msrest.serialization.Model):", "'str'}, 'ranges': {'key': 'ranges', 'type': 'str'}, } def __init__( self,", "flags: Video flags contain information about the available video actions", "'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs", "set to \"live\". :type segment_length: str :param retention_period: Video retention", "pipeline job operation. :vartype name: str :ivar status: The status", "include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type created_by_type: str or ~video_analyzer.models.CreatedByType", "exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation = { 'id': {'readonly':", "that can be references across the topology nodes. * Sources:", ":type name: str \"\"\" _validation = { 'type': {'required': True},", "~video_analyzer.models.SkuTier \"\"\" _validation = { 'name': {'required': True}, 'tier': {'readonly':", "information. :vartype system_data: ~video_analyzer.models.SystemData \"\"\" _validation = { 'id': {'readonly':", "): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None self.metric_specifications = None class", "sub-classes and not this class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel.", "Resource Manager APIs to return error details for failed operations.", "30 seconds to 5 minutes, in 30 seconds increments. Changing", "{'key': 'large', 'type': 'str'}, } def __init__( self, **kwargs ):", "are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map = { 'download_url':", "KeyVaultProperties(msrest.serialization.Model): \"\"\"The details for accessing the encryption keys in Key", ":type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why", "Required. An array of upstream node references within the topology", "archive_base_url: Video archive streaming base URL. The archived content can", "): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small', None) self.medium = kwargs.get('medium',", ":param type: Required. The identity type. :type type: str :param", "content files (segments) which are persisted to storage. Smaller segments", "a request. :ivar expiration_date: The content token expiration date in", "'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'action_type': {'key': 'actionType',", "True}, 'status': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map =", "token value to be added to the video content URL", "a default value can be optionally be overridden. :type parameters:", "{'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ):", "Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric", "'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key':", "the pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation =", "resources. :type value: list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map = { 'value': {'key':", "If omitted, encoder sets it automatically to try and match", "'str'}, } def __init__( self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type", "the credentials. :type username: str :param password: Required. Password to", "class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor allows for encoding of the input", "between 1 day to 10 years, in 1 day increments.", ":type retention_period: str \"\"\" _attribute_map = { 'title': {'key': 'title',", "Required. PEM formatted public certificates. One certificate per entry. :type", "self).__init__(**kwargs) self.group_id = None self.required_members = None self.required_zone_names = kwargs.get('required_zone_names',", "Operation(msrest.serialization.Model): \"\"\"An operation. All required parameters must be populated in", "'properties.type', 'type': 'str'}, 'flags': {'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key':", "'group_id': {'readonly': True}, 'required_members': {'readonly': True}, } _attribute_map = {", "Hub resource identifier. :type id: str :param identity: Required. The", "or ~video_analyzer.models.Kind :param sku: Describes the properties of a SKU.", "properties for tokens generated with Elliptical Curve algorithm. All required", ":param last_modified_by_type: The type of identity that last modified the", "self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description", "= None self.token = None class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type for", "managed identity's resource identifier to use when accessing a resource.", "securely established using IoT Hub device information. All required parameters", "provided by the user. Value can be up to 2048", "be ignored when sending a request. :ivar code: The error", "'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipeline,", "'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'}, } def __init__(", "sub-classes and not this class directly. Known sub-classes are: ProcessorNodeBase,", "None self.error = None self.parameters = kwargs.get('parameters', None) class PipelineTopology(ProxyResource):", "or instructions on how audio should be processed. You probably", "The error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error", "self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None self.principal_id =", "content. Variables are only populated by the server, and will", "video type is 'file' and video file is available for", "should be encoded. If omitted, encoder sets it automatically to", "items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to the", "and will be ignored when sending a request. :ivar code:", "is currently being referenced be an active pipeline. The fact", "'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, } def __init__( self, **kwargs", "is an optional property, typically used when the endpoint is", "in ISO8601 duration format (i.e. \"P1D\" equals 1 day) and", "does not affect other live pipelines in your account. :type", "'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__(", "to use when accessing a resource. :type user_assigned_identity: str \"\"\"", ":ivar edge_module_id: Internal ID generated for the instance of the", "be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url:", "a pipeline topology which captures content from a RTSP camera", "One certificate per entry. :type certificates: list[str] \"\"\" _validation =", "be referenced throughout the topology and can optionally have default", "self.type = None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None)", "'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( self, **kwargs", "\"\"\"Base class for certificate sources. You probably want to use", "# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All", "derived types.Constant filled by server. :type type: str :param kid:", "node. :type node_name: str \"\"\" _validation = { 'node_name': {'required':", "self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource): \"\"\"Live pipeline represents a unique", "self.identity = kwargs.get('identity', None) self.status = None class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base", "increments. Changing this value after the initial call to create", "str :param e: Required. RSA public key exponent. :type e:", "be kept in storage. It must be provided in the", "= kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): \"\"\"The IoT Hub details. Variables", "'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation',", "'type': '[PrivateLinkResource]'}, } def __init__( self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs)", "be unique within the topology. :type name: str \"\"\" _validation", "_validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'display_description':", ":param consumption: Public network access for consumption group. :type consumption:", "'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},", "'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps',", "to be presented as part of the credentials. It is", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param role: Defines the", ":type y: str \"\"\" _validation = { 'type': {'required': True},", "preset: Required. The encoder preset, which defines the recipe or", "'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'actions_required': {'key': 'actionsRequired',", "): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class", "The error message. :vartype message: str :ivar target: The error", "**kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None self.token = None", "'[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type':", "generated for the instance of the Video Analyzer edge module.", "} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id':", "the pipeline can connect to over TLS transport (data is", "kilobits per second or Kbps, at which video should be", "__init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None)", "for derived types.Constant filled by server. :type type: str \"\"\"", ":type has_data: bool :param is_in_use: Required. Value indicating whether or", "True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key':", "regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization class", "to return in one response). :type next_link: str \"\"\" _attribute_map", "__init__( self, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None)", "data to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase]", "applied on this specific pipeline. :type value: str \"\"\" _validation", "after the initial call to create the video resource can", "info type. :vartype type: str :ivar info: The additional info.", "): super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link',", "filled by server. :type type: str :param bitrate_kbps: Bitrate, in", "'kid', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey',", "last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of resource", "additional token claims to be validated. Token must contains all", "_attribute_map = { 'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, } def", "last modified the resource. Possible values include: \"User\", \"Application\", \"ManagedIdentity\",", "small: Low resolution preview image URL. :type small: str :param", "None) class PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private Endpoint resource. Variables are only", "'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState',", "utilized by the RTSP and RTP exchange: TCP or HTTP.", "the Azure IoT Edge module twin properties. :vartype token: str", "'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, }", "the cloud. A new provisioning token can be generated for", "export metric to shoebox. :vartype to_be_exported_for_shoebox: bool \"\"\" _validation =", "= kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource management error additional", "kind: str or ~video_analyzer.models.Kind :param sku: Describes the properties of", "{ 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ):", "VideoEntity items. :param value: A collection of VideoEntity items. :type", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'kind':", "list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation = { 'code': {'readonly': True}, 'message': {'readonly':", "self.status = None class SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining to creation and", "= kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): \"\"\"Video source allows for content", "'file' and video file is available for consumption. :type download_url:", "None) class PipelineJobError(msrest.serialization.Model): \"\"\"Details about the error for a failed", "'type': 'str'}, 'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'}, } def __init__(", "): super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link',", ":type value: list[~video_analyzer.models.LivePipeline] :param next_link: A link to the next", "self).__init__(**kwargs) self.value = kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): \"\"\"Operation details. :param", "'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates',", "Else if the mode is 'PreserveAspectRatio' then only one of", "**kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "connect to over clear transport (no encryption in transit). All", "'actionType', 'type': 'str'}, } def __init__( self, **kwargs ): super(Operation,", "identity that Video Analyzer will use to access the storage", "the specified storage account. :param value: Array of private endpoint", "**kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None self.status = None", "by this policy. Possible values include: \"Reader\". :type role: str", "_attribute_map = { 'key_identifier': {'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key':", "{'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map = { 'name':", "} def __init__( self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name =", "the video can be streamed. Only \"archive\" type videos can", "ErrorDetail(msrest.serialization.Model): \"\"\"The error detail. Variables are only populated by the", "resource model definition for a Azure Resource Manager proxy resource.", "= None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class", "reference to an existing pipeline topology defined for real-time content", "IoT edge module and the cloud. After the initial handshake,", "id: str :param identity: A managed identity that Video Analyzer", "self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model):", "video sink publishes content via the video resource. This property", ":type url: str :param tunnel: Describes the tunnel through which", ":param private_link_service_connection_state: A collection of information about the state of", "topology sink nodes. Sink nodes allow pipeline data to be", "authorization token to download the most recent still image from", "{'readonly': True}, 'kind': {'required': True}, 'sku': {'required': True}, } _attribute_map", "is recommended that this value is parameterized as a secret", "and not this class directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase,", "nodes through the use of \"${PARAMETER_NAME}\" string pattern. Parameters can", ":vartype to_be_exported_for_shoebox: bool \"\"\" _validation = { 'name': {'readonly': True},", "True}, 'additional_info': {'readonly': True}, } _attribute_map = { 'code': {'key':", "populated by the server, and will be ignored when sending", "why the given name is available. :type message: str \"\"\"", "{ 'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type':", "necessarily indicate that data is being received. For example, video", ":type description: str \"\"\" _attribute_map = { 'provider': {'key': 'provider',", "kwargs['x'] self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource): \"\"\"The representation of an", "be greater than zero, and less than or equal to", "the endpoint URL. This is an optional property, typically used", "'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, } def", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__( self,", "'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, } def", ":param key_identifier: Required. The URL of the Key Vault key", "characteristics. For instance, a pipeline topology which captures content from", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = kwargs['username']", "None) class OperationDisplay(msrest.serialization.Model): \"\"\"Operation details. :param provider: The service provider.", "'system_data': {'readonly': True}, 'kind': {'required': True}, 'sku': {'required': True}, }", "'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers',", "resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The", "super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name =", "appending the following to the base URL: .. code-block:: -", "'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDefinition,", "username: Required. Username to be presented as part of the", "__init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type = None #", "include: \"Standard\". :vartype tier: str or ~video_analyzer.models.SkuTier \"\"\" _validation =", "kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): \"\"\"Describes an input signal to be", "segment_length: Segment length indicates the length of individual content files", "This string is case-sensitive. :type device_id: str \"\"\" _validation =", "skipped. Default is 'false'. :type ignore_hostname: str :param ignore_signature: When", "that last modified the resource. Possible values include: \"User\", \"Application\",", "None) self.next_link = kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking", ":vartype name: str :ivar display_name: The metric display name. :vartype", "intended to be kept in storage. It must be provided", "= kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): \"\"\"Defines", "The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether", "(This also follows the OData error response format.). :param error:", "'iotHubName', 'type': 'str'}, 'device_id': {'key': 'deviceId', 'type': 'str'}, } def", "aggregation type. Possible values include: \"Average\", \"Count\", \"Total\". :vartype lock_aggregation_type:", "{'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption':", ":param type: Required. The type of the endpoint. Possible values", "= kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection of PipelineJob items.", "disconnect temporarily from the camera. It will retry to re-establish", "Name of the claim which must be present on the", "EdgeModuleEntity items. :param value: A collection of EdgeModuleEntity items. :type", "string parameter. The token is specific to a single video.", "'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(ErrorResponse,", "'state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs", "with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account", "user_assigned_identity: Required. The user assigned managed identity's resource identifier to", "kwargs['id'] self.identity = kwargs['identity'] self.status = None class JwtAuthentication(AuthenticationBase): \"\"\"Properties", "items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link: A link to the", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param title:", "= kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the parameter value of", "resource of type 'file'. All required parameters must be populated", "existing pipeline topology defined for real-time content processing. When activated,", "the camera bitrate is now below the reserved capacity. Doing", "streamed. Only \"archive\" type videos can be streamed. :type can_stream:", "str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate. :type x:", "RTSP stream. It is available when the video type is", "of the input content. For example, it can used to", "title: Optional video title provided by the user. Value can", "= kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url',", "\"\"\"The check availability result. :param name_available: Indicates if the resource", "**kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties',", "to Azure. :ivar id: Fully qualified resource ID for the", "resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for", "token is specific to a single video. :vartype token: str", "topology should be defined according to the scenario to be", "'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'}, } def __init__( self, **kwargs", "{'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options':", "when sending a request. All required parameters must be populated", "'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'},", "'[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self,", "be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation =", "None) class Properties(msrest.serialization.Model): \"\"\"Metric properties. Variables are only populated by", "AccessPolicyEntity(ProxyResource): \"\"\"Access policies help define the authentication rules, and control", "'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'}, } def __init__(", "super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None)", "the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The", "identity: A managed identity that Video Analyzer will use to", "account. Possible values include: \"Enabled\", \"Disabled\". :type public_network_access: str or", "next_link: A link to the next page of the collection", "kwargs['username'] self.password = kwargs['password'] class VideoAnalyzer(TrackedResource): \"\"\"The Video Analyzer account.", "\"\"\"Key properties for JWT token validation. You probably want to", "The reason for approval/rejection of the connection. :type description: str", "a request. :ivar name: The metric name. :vartype name: str", "kwargs.get('tags', None) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None)", "{'key': 'default', 'type': 'str'}, } def __init__( self, **kwargs ):", "values. :type audiences: list[str] :param claims: List of additional token", "kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private", "True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'},", "name is available. :type name_available: bool :param reason: The reason", "video_encoder: Describes a custom preset for encoding video. :type video_encoder:", "'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def", "'#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg = kwargs['alg'] self.n = kwargs['n']", "'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = {", "~video_analyzer.models.VideoScale \"\"\" _validation = { 'type': {'required': True}, } _attribute_map", "sub-classes and not this class directly. Known sub-classes are: AudioEncoderAac.", "~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint information for Video Analyzer", "= kwargs['node_name'] class Operation(msrest.serialization.Model): \"\"\"An operation. All required parameters must", "-------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model): \"\"\"Common", "that one 'noisy neighbor' does not affect other live pipelines", "= kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): \"\"\"A custom preset for encoding", ":param value: A collection of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer]", "'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit',", "validated. Token must contains all claims and respective values for", "the AAC codec. All required parameters must be populated in", "contain information about the available video actions and its dynamic", "order to send to Azure. :param user_assigned_identity: Required. The user", ":ivar error: The error details for the live pipeline operation.", "be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options to", "\"\"\" _validation = { 'type': {'required': True}, 'credentials': {'required': True},", "'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs)", "client_id: str :ivar principal_id: The principal ID. :vartype principal_id: str", "is case-sensitive. :type device_id: str \"\"\" _validation = { 'type':", "videos have this value set to false. :type has_data: bool", ":vartype system_data: ~video_analyzer.models.SystemData :param role: Defines the access level granted", "be defined. Topology parameters with a default value can be", "'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def", "= { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps',", ":type n: str :param e: Required. RSA public key exponent.", "the status of an operation on the live pipeline. Variables", "Variables are only populated by the server, and will be", "= { 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__(", "description: An optional description for the pipeline. :type description: str", "to Azure. :param name: Required. The SKU name. Possible values", "of TLS endpoints. :param ignore_hostname: When set to 'true' causes", "as individual cameras' RTSP endpoints and credentials. Overall a topology", "{'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, }", "__init__( self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' #", "} _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'identity':", "__init__( self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties", "all Azure Resource Manager APIs to return error details for", "self).__init__(**kwargs) self.name = kwargs['name'] self.display = kwargs.get('display', None) self.origin =", "values include: \"Inactive\", \"Activating\", \"Active\", \"Deactivating\". :vartype state: str or", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id':", "of the ranges should add up to 24 hours or", "topology is used only for low latency video streaming. Default", "for access validation based on JSON Web Tokens (JWT). All", ":ivar tier: The SKU tier. Possible values include: \"Standard\". :vartype", "{ 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type':", "'status': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = {", "None class MetricSpecification(msrest.serialization.Model): \"\"\"A metric emitted by service. Variables are", "sub-classes and not this class directly. Known sub-classes are: JwtAuthentication.", "class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology describes the processing steps to be", "Video Analyzer relies on tables, queues, and blobs. The primary", "access validation based on JSON Web Tokens (JWT). All required", "string. The datetime values should follow IS08601, and the sum", "= { 'type': {'required': True}, } _attribute_map = { 'endpoint_url':", "/manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover,", ":type name: str :param transport: Network transport utilized by the", "{'key': 'hasData', 'type': 'bool'}, 'is_in_use': {'key': 'isInUse', 'type': 'bool'}, }", "sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required parameters must be populated", "Required. RTSP endpoint information for Video Analyzer to connect to.", "str :param id: Operation resource ID. :type id: str :param", "received. For example, video recording may be gated on events", "The identity type. :type type: str :param user_assigned_identities: The User", "None) self.state = None self.parameters = kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model):", "processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of", "topology to be parameterized. This allows individual pipelines refer to", "to 5 minutes, in 30 seconds increments. Changing this value", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs)", "def __init__( self, **kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name']", "= { 'type': {'required': True}, 'kid': {'required': True}, } _attribute_map", "sending a request. :ivar type: The additional info type. :vartype", "is 30 seconds. This property is only allowed for topologies", "connection. When using HTTP, the RTSP messages are exchanged through", "in order to send to Azure. :param can_stream: Required. Value", "errors when uploading content to the archive. Default value is", "the topology is used only for low latency video streaming.", "order to send to Azure. :param name: Required. The SKU", "video encoding presets, which define the recipe or instructions on", "ranges. The video source only picks up recorded media within", "the amount of storage transactions while increasing the archive playback", "when sending a request. :ivar name: The diagnostic log category", "reason for approval/rejection of the connection. :type description: str :param", "URL of the Key Vault key used to encrypt the", "desired output video width. :type width: str :param mode: Describes", "'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } def __init__( self,", "= None class MetricSpecification(msrest.serialization.Model): \"\"\"A metric emitted by service. Variables", "The expiration date of the registration token. The Azure Video", "self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = kwargs['username'] self.password", "a pipeline. Currently supported only with batch pipelines. All required", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self,", "a request. :ivar name: The name of the live pipeline", "RTSP cameras through live pipelines or can be created by", "alg: Required. RSA algorithm to be used: RS256, RS384 or", ":ivar name: The name of the live pipeline operation. :vartype", "Required. The type of key used to encrypt the Account", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase',", "'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status',", "str :param preview_image_urls: Video preview image URLs. These URLs can", "the certificate chain trust validation to be skipped. Default is", "be present on the token. :type value: str \"\"\" _validation", "= { 'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates',", "keys are looked up based on the key id present", "self).__init__(**kwargs) self.expiration_date = None self.token = None class VideoContentUrls(msrest.serialization.Model): \"\"\"Set", "'type': 'str'}, 'x': {'key': 'x', 'type': 'str'}, 'y': {'key': 'y',", "RTSP messages. Possible values include: \"Http\", \"Tcp\". :type transport: str", "increments of 100 Kbps. If the RTSP camera exceeds this", "class VideoScale(msrest.serialization.Model): \"\"\"The video scaling information. :param height: The desired", "parameters: List of the instance level parameter values for the", "def __init__( self, **kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url',", "Token issuer is valid if it matches at least one", "call to create the video resource can lead to errors", "= { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__( self, **kwargs", "# type: str self.transport = kwargs.get('transport', None) self.endpoint = kwargs['endpoint']", "this class directly. Known sub-classes are: EncoderProcessor. All required parameters", "or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video", ":param description: An optional description of the pipeline topology. It", "VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update operation for a Video Analyzer account. Variables", "an ongoing video recording can be played in \"live mode\"", "storage. Smaller segments provide lower archive playback latency but generate", "~video_analyzer.models.SystemData :param title: Optional video title provided by the user.", "None # type: Optional[str] class CertificateSource(msrest.serialization.Model): \"\"\"Base class for certificate", "} def __init__( self, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type =", "{ 'value': {'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type':", "'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self, **kwargs", "of individual video files (segments) which are persisted to storage.", "can be reused across many pipeline instances which share the", "HLS players by appending the following to the base URL:", "not be accessible at the time. :type is_in_use: bool \"\"\"", "endpoint: ~video_analyzer.models.EndpointBase \"\"\" _validation = { 'type': {'required': True}, 'name':", "{'readonly': True}, 'blob_duration': {'readonly': True}, } _attribute_map = { 'name':", "\"archive\" type videos can be streamed. :type can_stream: bool :param", "tracking the status of an operation on the live pipeline.", "None class MetricDimension(msrest.serialization.Model): \"\"\"A metric dimension. Variables are only populated", "periodically connect to the cloud. A new provisioning token can", "self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model):", "'str'}, } def __init__( self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type", "pipeline job. Variables are only populated by the server, and", "format in the granularity of days, up to a maximum", "{'key': 'value', 'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, }", "across many pipeline instances which share the same processing characteristics.", "resource to be used as the source. :type video_name: str", "filled by server. :type type: str :param name: Required. Node", "used: ES256, ES384 or ES512. Possible values include: \"ES256\", \"ES384\",", "the video and audio content. :param segment_length: Video segment length", "): super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name", "type: str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model):", "str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock aggregation type.", "for endpoints. You probably want to use the sub-classes and", "None) class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list of private link resources. :param", "for which availability needs to be checked. :type name: str", "up to 2048 characters long. :type description: str :ivar type_properties_type:", "} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name':", "Authentication method to be used when validating client API access.", "in order to send to Azure. :param name: Required. The", "The operation description. :type description: str \"\"\" _attribute_map = {", "= kwargs['type'] self.description = kwargs.get('description', None) self.default = kwargs.get('default', None)", "of PipelineJob items. :param value: A collection of PipelineJob items.", ":vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation = { 'id':", "**kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str", "video resource to be used as the source. :type video_name:", "of an operation on the pipeline job. Variables are only", "request. :param tags: A set of tags. Resource tags. :type", "indicate that data is being received. For example, video recording", "self.group_id = None self.required_members = None self.required_zone_names = kwargs.get('required_zone_names', None)", "class directly. Known sub-classes are: EncoderProcessor. All required parameters must", "audio should be encoded (2-channel stereo audio at a sampling", "\"\"\"Provisioning token properties. A provisioning token allows for a single", "disable_archive: str :param disable_rtsp_publishing: When set to 'true' the RTSP", "'name': {'readonly': True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True}, }", "private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "transport: Network transport utilized by the RTSP and RTP exchange:", "def __init__( self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type']", "processed. :type preset: ~video_analyzer.models.EncoderPresetBase \"\"\" _validation = { 'type': {'required':", "A collection of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link:", "~video_analyzer.models.SystemData :param kind: Topology kind. Possible values include: \"Live\", \"Batch\".", "or transformations. * Sinks: list of one or more data", "'str'}, 'tier': {'key': 'tier', 'type': 'str'}, } def __init__( self,", "): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class", "Kbps reserved for the live pipeline. The allowed range is", "the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param", "self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None) class", "'info', 'type': 'object'}, } def __init__( self, **kwargs ): super(ErrorAdditionalInfo,", "'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type':", "= { 'client_id': {'key': 'clientId', 'type': 'str'}, 'principal_id': {'key': 'principalId',", "which define the recipe or instructions on how audio should", "this class directly. Known sub-classes are: VideoSink. All required parameters", "'type': 'EndpointBase'}, } def __init__( self, **kwargs ): super(RtspSource, self).__init__(**kwargs)", "class Endpoint(msrest.serialization.Model): \"\"\"The endpoint details. All required parameters must be", "content should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase \"\"\" _validation =", "True}, } _attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'},", "__init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id", "'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type':", "'[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self,", "long. :type title: str :param description: Optional description provided by", "the Video Analyzer account. Possible values include: \"Enabled\", \"Disabled\". :type", "media from an RTSP camera or generic RTSP server to", "self.type = None # type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a", "be referenced throughout the topology nodes through the use of", "'height': {'key': 'height', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'},", "None) class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology parameter declaration. Declared parameters can", "'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self,", "iot_hubs: The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub]", "the pipeline topology. :type name: str :param value: Parameter value", "this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption", "on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to", "class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list of private link resources. :param value:", "used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options to use", "str :param resource: Resource on which the operation is performed.", "of VideoAnalyzer items. :param value: A collection of VideoAnalyzer items.", "the length of individual content files (segments) which are persisted", "'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def", "256 characters long. :type title: str :param description: Optional description", "following to the base URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8", "key id present on the JWT token header. :type kid:", "to the video content URLs.\". Variables are only populated by", ":param medium: Medium resolution preview image URL. :type medium: str", "grain types. :vartype supported_time_grain_types: list[str] \"\"\" _validation = { 'name':", "will process content according to the pipeline topology definition. :type", "pipelines. All required parameters must be populated in order to", "name: str :param transport: Network transport utilized by the RTSP", "super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type', None)", ":type time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\" _validation = { 'type': {'required': True},", "maximum age of the video archive segments which are intended", "error code. :vartype code: str :ivar message: The error message.", "declared here can be referenced throughout the topology nodes through", "try and match the quality of the input video. :type", "service metric specifications. Variables are only populated by the server,", "'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'},", "__init__( self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None)", "directly. Known sub-classes are: EccTokenKey, RsaTokenKey. All required parameters must", "kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of video", "types.Constant filled by server. :type type: str :param credentials: Required.", "'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'},", "= { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'display_description': {'readonly':", "The error code. :type code: str :param message: The error", "'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self,", "value is 30 seconds. This property is only allowed for", "for credential objects. You probably want to use the sub-classes", "{ 'type': {'key': '@type', 'type': 'str'}, 'ranges': {'key': 'ranges', 'type':", "to the token expiration date. :type expiration_date: ~datetime.datetime \"\"\" _validation", "kwargs.get('description', None) self.default = kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the", "= '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder", "**kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model):", "sink nodes. Sink nodes allow pipeline data to be stored", "for license information. # Code generated by Microsoft (R) AutoRest", "= kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks',", "your account. :vartype expiration: ~datetime.datetime :ivar error: Details about the", "video type is 'archive' and video archiving is enabled. :type", "include: \"String\", \"SecretString\", \"Int\", \"Double\", \"Bool\". :type type: str or", "The service provider. :type provider: str :param resource: Resource on", "consumer and provider. :param status: Indicates whether the connection has", ":param user_assigned_identity: Required. The user assigned managed identity's resource identifier", "properties format. :type properties: ~video_analyzer.models.Properties :param is_data_action: Whether the operation", "self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model):", "to \"live\". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\" _validation = { 'type':", "def __init__( self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url',", "definition for a Azure Resource Manager proxy resource. It will", "None self.display_name = None self.blob_duration = None class MetricDimension(msrest.serialization.Model): \"\"\"A", "'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type':", "private endpoint connection associated with the specified storage account. :param", "tunnel: ~video_analyzer.models.TunnelBase \"\"\" _validation = { 'type': {'required': True}, 'credentials':", "to shoebox. :vartype to_be_exported_for_shoebox: bool \"\"\" _validation = { 'name':", "and can optionally have default values to be used when", "The SKU tier. Possible values include: \"Standard\". :vartype tier: str", "str self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options =", "\"PreserveAspectRatio\", \"Stretch\". :type mode: str or ~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map =", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Required.", "causes the certificate subject name validation to be skipped. Default", "policies help define the authentication rules, and control access to", "values include: \"Average\", \"Count\", \"Total\". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType", "Analyzer edge module. :vartype edge_module_id: str \"\"\" _validation = {", "or override parameters values for parameters which have been declared", "of resource creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by: The", "# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.", "30 days will be periodically deleted. This value can be", "type videos can be streamed. :type can_stream: bool :param has_data:", "True}, 'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly': True},", "): super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description',", "the video content authorization token to download the most recent", "'e', 'type': 'str'}, } def __init__( self, **kwargs ): super(RsaTokenKey,", "Value indicating whether or not the video can be streamed.", "sinks which allow for data to be stored or exported", "ignored when sending a request. :ivar type: The additional info", "'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type':", "disableArchive is set to true, then no content is archived.", "'@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, } def", "content authorization token to download the most recent still image", "None) self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error", "in \"live mode\" with latencies which are approximately double of", "None self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access control for", "super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type =", "not public network access is allowed for specified resources under", "= kwargs.get('identity', None) self.status = None class SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining", "start time. :type start_time: str :param end_time: Operation end time.", "def __init__( self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role',", "\"\"\"Describes a custom preset for encoding the input content using", "This is used, for example, when the topology is used", "to send to Azure. :param key_identifier: Required. The URL of", "used in case a new video resource needs to be", "list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "different resolutions. They are available when the video type is", "NodeInput(msrest.serialization.Model): \"\"\"Describes an input signal to be used on a", "**kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None) self.type =", "list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateLinkResource]'},", "API access. :type authentication: ~video_analyzer.models.AuthenticationBase \"\"\" _validation = { 'id':", "error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter", "'{str}'}, 'location': {'key': 'location', 'type': 'str'}, } def __init__( self,", "'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key':", "'str'}, 'y': {'key': 'y', 'type': 'str'}, } def __init__( self,", "and video file is available for consumption. :type download_url: str", "= None # type: Optional[str] self.name = kwargs['name'] class ProcessorNodeBase(NodeBase):", "'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self,", "a live topology, used for real-time ingestion, archiving and publishing", "self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.type_properties_type =", "credentials: Required. Credentials to be presented to the endpoint. :type", ":param audio_encoder: Describes a custom preset for encoding audio. :type", "self.message = kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the", "{'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors':", "recording may be gated on events or camera may not", "= kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology describes the processing", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__( self,", "'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location',", "state. All required parameters must be populated in order to", "when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase", "__init__( self, **kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "a built-in preset for encoding the input content using the", "values include: \"Failed\", \"InProgress\", \"Succeeded\". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState", "name. Must be unique within the topology. :type name: str", "be periodically deleted. This value can be updated at any", "sku: ~video_analyzer.models.Sku :param description: An optional description of the pipeline", "modified the resource. Possible values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\".", "**kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None self.principal_id = None", "auto-rotated as long as the module is able to periodically", "class LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection of LivePipeline items. :param value: A", "super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name =", "Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote tunnel securely established using IoT", "populated in order to send to Azure. :param key_identifier: Required.", "class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of an operation", "kwargs.get('supported_aggregation_types', None) self.dimensions = None self.enable_regional_mdm_account = None self.source_mdm_account =", "None) self.description = kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None) self.retention_period", "self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description", "ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List", "__init__( self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None)", "SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining to creation and last modification of the", "of the upstream node in the pipeline which output is", "identity that last modified the resource. Possible values include: \"User\",", "Username to be presented as part of the credentials. :type", "'[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self,", "operation on the live pipeline. Variables are only populated by", "certificate per entry. :type certificates: list[str] \"\"\" _validation = {", "} def __init__( self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role =", "\"\"\"Used for tracking the status of an operation on the", "True}, 'error': {'readonly': True}, } _attribute_map = { 'id': {'key':", "'type': {'key': 'type', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'},", "topology is used only for archiving content. Default is 'false'.", ":ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:", "'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly': True}, }", "be ingested into a pipeline. Currently supported only with batch", "in the referenced topology. Topology parameters without a default value", "rate of 48 kHz). Allowed values are 96, 112, 128,", "state of the Video Analyzer account. Possible values include: \"Failed\",", "kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence", "'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},", "The provisioning token itself is short lived and it is", "'[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type':", "{ 'name': {'required': True}, 'type': {'required': True}, } _attribute_map =", "'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs", ":param network_access_control: Network access control for Video Analyzer. :type network_access_control:", "link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map = { 'value':", ":param sku: Describes the properties of a SKU. :type sku:", "which Video Analyzer can connect to the endpoint URL. This", "service_specification: ~video_analyzer.models.ServiceSpecification \"\"\" _validation = { 'service_specification': {'readonly': True}, }", "URL. The archived content can be automatically played by the", "'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = {", "properties for tokens generated with RSA algorithm. All required parameters", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'iot_hub_name':", "such as individual cameras' RTSP endpoints and credentials. Overall a", "message: The error message. :type message: str \"\"\" _attribute_map =", "self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type = None # type:", "which are approximately double of the chosen video segment length.", "str :param ranges: Required. The sequence of datetime ranges. Example:", "__init__( self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( self,", "alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public key", "str :param large: High resolution preview image URL. :type large:", "'bool'}, 'reason': {'key': 'reason', 'type': 'str'}, 'message': {'key': 'message', 'type':", "media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties. :type archival: ~video_analyzer.models.VideoArchival", "'str'}, 'mode': {'key': 'mode', 'type': 'str'}, } def __init__( self,", "{'key': 'endpointUrl', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, }", ":type code: str :param message: The error message. :type message:", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase':", "self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info =", "self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): \"\"\"The service metric specifications. Variables", "Required. RSA public key modulus. :type n: str :param e:", "storage account resource. Video Analyzer relies on tables, queues, and", "category display name. :vartype display_name: str :ivar blob_duration: The time", "required_members: The private link resource required member names. :vartype required_members:", "'video_name': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id", "least one of the given values. :type issuers: list[str] :param", "message. :type message: str \"\"\" _attribute_map = { 'code': {'key':", "{'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, }", "'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default': {'key': 'default',", "instance of a batch topology, used for offline processing of", "and match the quality of the input video. :type bitrate_kbps:", "key may either be versioned (for example https://vault/keys/mykey/version1) or reference", "content according to the pipeline topology definition. :type topology_name: str", ":param tunnel: Describes the tunnel through which Video Analyzer can", "): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource = kwargs.get('resource',", "{ 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type':", "None) self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): \"\"\"Base class for tunnel", "= { 'id': {'required': True}, 'identity': {'required': True}, 'status': {'readonly':", "'service_specification': {'readonly': True}, } _attribute_map = { 'service_specification': {'key': 'serviceSpecification',", "None) class LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection of LivePipeline items. :param value:", "identity that last modified the resource. :type last_modified_by: str :param", "n: str :param e: Required. RSA public key exponent. :type", "'@type', 'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type': 'str'}, 'device_id': {'key':", "particular outcome. The topology should be defined according to the", "PipelineJob(ProxyResource): \"\"\"Pipeline job represents a unique instance of a batch", "validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\" _validation = {", "'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase): \"\"\"Video", "\"\"\" _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type':", "the video is currently being referenced be an active pipeline.", "= None self.details = None self.additional_info = None class ErrorResponse(msrest.serialization.Model):", "status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for approval/rejection", "expected token issuers. Token issuer is valid if it matches", "owner of the service. Possible values include: \"Pending\", \"Approved\", \"Rejected\".", "that the pipeline can connect to over clear transport (no", "= kwargs.get('code', None) self.message = kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used", "\"Internal\". :type action_type: str or ~video_analyzer.models.ActionType \"\"\" _validation = {", "period indicates the maximum age of the video archive segments", "'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type':", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__(", "seconds to 5 minutes, in 30 seconds increments. :type segment_length:", "_attribute_map = { 'height': {'key': 'height', 'type': 'str'}, 'width': {'key':", "'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'},", "{'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state':", "resource. :type user_assigned_identity: str \"\"\" _validation = { 'user_assigned_identity': {'required':", "self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time =", "**kwargs ): super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height', None) self.width =", ":type description: str :param default: The default value for the", "the pipeline topology. It is recommended that the expected use", "'systemData', 'type': 'SystemData'}, 'title': {'key': 'properties.title', 'type': 'str'}, 'description': {'key':", "\"\"\" _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message':", "__init__( self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id = None class", "(30 days), content older than 30 days will be periodically", "be populated in order to send to Azure. :param user_assigned_identity:", "to 'true', then \"disableRtspPublishing\" must be set to 'false'. :type", "{ 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'display_description': {'readonly': True},", "frames per second) of the encoded video. The value must", "or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter", "None) class TokenClaim(msrest.serialization.Model): \"\"\"Properties for expected token claims. All required", "Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): \"\"\"A custom preset", "the live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation =", "kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): \"\"\"Properties for expected token claims. All", "'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description': {'key': 'description',", "pipeline topology parameters for more information. All required parameters must", "The metric display name. :vartype display_name: str :ivar display_description: The", "= kwargs['name'] self.value = kwargs.get('value', None) class PemCertificateList(CertificateSource): \"\"\"A list", "to the Internet prior to the token expiration date. :vartype", "kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections", ":type transport: str or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint", "job. Videos ingested through live pipelines can be streamed through", "str :param description: Optional description provided by the user. Value", "define or override parameters values for parameters which have been", "which have been declared in the referenced topology. Topology parameters", "'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key':", "collection of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link: A", "\"\"\"The service metric specifications. Variables are only populated by the", "its dynamic properties based on the current video state. All", "= kwargs['x'] self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource): \"\"\"The representation of", "self.expiration_date = None self.token = None class VideoContentUrls(msrest.serialization.Model): \"\"\"Set of", "the storage account resource. Video Analyzer relies on tables, queues,", ":param name: Required. Name of the built-in encoding preset. Possible", "be ingested from cameras. * Processors: list of nodes which", "~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'required': True}, } _attribute_map", "'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__( self, **kwargs", "{'key': 'description', 'type': 'str'}, 'default': {'key': 'default', 'type': 'str'}, }", "{'key': 'isInUse', 'type': 'bool'}, } def __init__( self, **kwargs ):", "self.endpoint_url = kwargs.get('endpoint_url', None) self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model): \"\"\"Base", "list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},", "VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of video analyzer operation. All required parameters must", "sub-classes are: AudioEncoderAac. All required parameters must be populated in", "populated in order to send to Azure. :param can_stream: Required.", "= { 'key_identifier': {'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier',", "str :param properties: Operation properties format. :type properties: ~video_analyzer.models.Properties :param", "used for offline processing of selected portions of archived content.", "processors: List of the topology processor nodes. Processor nodes enable", "'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity',", "played on any standard media player. It is available when", "'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key':", "source MDM namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types: The supported", "to Azure. :param type: Required. The discriminator for derived types.Constant", "by server. :type type: str :param name: Required. Name of", "None class PrivateEndpointConnection(Resource): \"\"\"The Private Endpoint Connection resource. Variables are", ":param y: Required. Y coordinate. :type y: str \"\"\" _validation", "'large', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPreviewImageUrls,", "keys which will be auto-rotated as long as the module", "'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type':", "): super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates", "VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview image URLs. These URLs can be used", "allowed for topologies where \"kind\" is set to \"live\". :param", "{'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length':", "of type 'file'. All required parameters must be populated in", "self, **kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None) class", "'type': '{UserAssignedManagedIdentity}'}, } def __init__( self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs)", "Optional[str] class TokenKey(msrest.serialization.Model): \"\"\"Key properties for JWT token validation. You", "'medium': {'key': 'medium', 'type': 'str'}, 'large': {'key': 'large', 'type': 'str'},", "'kid': {'required': True}, 'alg': {'required': True}, 'x': {'required': True}, 'y':", "self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs = kwargs['inputs'] class", "'endpointUrl', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def", "True}, 'info': {'readonly': True}, } _attribute_map = { 'type': {'key':", "connection operation. All required parameters must be populated in order", "of video analyzer operation. All required parameters must be populated", "The error details for the live pipeline operation. :vartype error:", "change how video is published. These are only allowed for", "{'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, }", "The name of the live pipeline operation. :vartype name: str", "self.keys = kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): \"\"\"The details for accessing", "'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key':", "300. If omitted, the encoder uses the average frame rate", "'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key':", "an existing pipeline topology. When activated, this pipeline job will", "token: The token blob to be provided to the Azure", "used to encrypt the Account Key. Possible values include: \"SystemKey\",", "self.next_link = kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): \"\"\"Video flags contain information", ":vartype message: str :ivar target: The error target. :vartype target:", "None) self.authentication = kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection of", "\"Batch\". :type kind: str or ~video_analyzer.models.Kind :param sku: Describes the", "str :param username: Required. Username to be presented as part", "pipeline job. Videos ingested through live pipelines can be streamed", "to be returned as part of the resource on API", "description: str :param parameters: List of the topology parameter declarations.", "= None self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None)", "\"\"\"A private link resource. Variables are only populated by the", "): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature',", "Array of private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map", "sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "is_data_action: Whether the operation applies to data-plane. :type is_data_action: bool", "aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock aggregation", "'[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name", "self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku = kwargs['sku'] self.description = kwargs.get('description',", "name of the resource for which availability needs to be", "parameters which have been declared in the referenced topology. Topology", "formatted public certificates. One certificate per entry. :type certificates: list[str]", "'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key':", "of the encoded video. The value must be greater than", "id to use when establishing the remote tunnel. This string", "ingestion, archiving and publishing of content for a unique RTSP", "ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token: The", ":ivar source_mdm_account: The source MDM account. :vartype source_mdm_account: str :ivar", "def __init__( self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name']", "'alg': {'required': True}, 'x': {'required': True}, 'y': {'required': True}, }", "'#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name = kwargs['name'] class Endpoint(msrest.serialization.Model): \"\"\"The", "on the pipeline job. Variables are only populated by the", "self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class VideoEntity(ProxyResource): \"\"\"Represents a", "'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ): super(ErrorDetail,", "and the cloud. After the initial handshake, the IoT edge", "must be defined. Topology parameters with a default value can", "'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type':", "for nodes. You probably want to use the sub-classes and", "'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def __init__( self, **kwargs", "retention_period: str \"\"\" _attribute_map = { 'title': {'key': 'title', 'type':", "public_network_access: Whether or not public network access is allowed for", "'#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties',", "* Parameters: list of user defined parameters that can be", "topology source nodes. You probably want to use the sub-classes", "class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties to be used in case a", "AutoRest Code Generator. # Changes may cause incorrect behavior and", "'bool'}, } def __init__( self, **kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name", "resource creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by: The identity", "video content authorization token to expose a WebSocket tunneled RTSP", "include: \"Internal\". :type action_type: str or ~video_analyzer.models.ActionType \"\"\" _validation =", "'noisy neighbor' does not affect other live pipelines in your", "the claim which must be present on the token. :type", "doesn't necessarily indicate that data is being received. For example,", "'#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__( self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs)", "'value', 'type': '[PrivateLinkResource]'}, } def __init__( self, **kwargs ): super(PrivateLinkResourceListResult,", "super(EncoderPresetBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class EncoderCustomPreset(EncoderPresetBase):", "~video_analyzer.models.EncoderPresetBase \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':", "{'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource':", "origin: str :param properties: Operation properties format. :type properties: ~video_analyzer.models.Properties", "Azure. :param name: Required. Operation identifier. :type name: str :param", "} def __init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name =", "= None self.message = None self.target = None self.details =", "include: \"Average\", \"Count\", \"Total\". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param", "'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(PipelineJobOperationStatus,", "The SKU name. Possible values include: \"Live_S1\", \"Batch_S1\". :type name:", "= { 'type': {'key': '@type', 'type': 'str'}, 'username': {'key': 'username',", "be provided to the Azure Video Analyzer IoT edge module", "'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},", "\"\"\" _validation = { 'expiration_date': {'required': True}, } _attribute_map =", ":param description: Description of the parameter. :type description: str :param", ":vartype system_data: ~video_analyzer.models.SystemData :param title: Optional video title provided by", "by service. Variables are only populated by the server, and", "# type: str self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model): \"\"\"The SKU", "least one of the given values. :type audiences: list[str] :param", "directly. Known sub-classes are: AudioEncoderAac. All required parameters must be", "): super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name", ":param value: Required. Expected value of the claim to be", "EccTokenKey, RsaTokenKey. All required parameters must be populated in order", "video content. :param download_url: Video file download URL. This URL", "data is being received. For example, video recording may be", "= kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of private endpoint connection", ":type endpoint_url: str :param type: Required. The type of the", ":type small: str :param medium: Medium resolution preview image URL.", "of the following: * Parameters: list of user defined parameters", "{'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason', 'type': 'str'}, 'message':", "str :param type: Required. The type of the endpoint. Possible", "'[TokenKey]'}, } def __init__( self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type", "self.flags = None self.content_urls = None self.media_info = kwargs.get('media_info', None)", "None self.error = None self.parameters = kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model):", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID", "None) self.next_link = kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token properties.", "and can vary between 30 seconds to 5 minutes, in", "Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status", "device information. All required parameters must be populated in order", "def __init__( self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type = None", "Required. Username to be presented as part of the credentials.", "_attribute_map = { 'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key':", "VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information about the video and audio content. :param", "kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection of information about the", "\"\"\"The details for accessing the encryption keys in Key Vault.", "'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'},", "'properties.edgeModuleId', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntity,", "coordinate. :type x: str :param y: Required. Y coordinate. :type", "'has_data': {'key': 'hasData', 'type': 'bool'}, 'is_in_use': {'key': 'isInUse', 'type': 'bool'},", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, 'endpoints':", "itself is short lived and it is only used for", "up to 256 characters long. :type title: str :param description:", "'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key':", "be initialized and connected to the Internet prior to the", "'type': 'bool'}, } def __init__( self, **kwargs ): super(MetricDimension, self).__init__(**kwargs)", "access. :type authentication: ~video_analyzer.models.AuthenticationBase \"\"\" _validation = { 'id': {'readonly':", "content from a Video Analyzer video resource to be ingested", "'x', 'type': 'str'}, 'y': {'key': 'y', 'type': 'str'}, } def", "job operation. :vartype name: str :ivar status: The status of", "'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type':", "kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): \"\"\"Details about", "} _attribute_map = { 'key_identifier': {'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier':", "for encoding video with the H.264 (AVC) codec. All required", "{ 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type':", "filled by server. :type type: str :param name: Required. Name", "key id. Validation keys are looked up based on the", "type. Possible values include: \"Average\", \"Count\", \"Total\". :vartype lock_aggregation_type: str", "chosen video segment length. It is available when the video", "in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param", "encoded video. If omitted, the encoder uses the resolution of", "'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs)", "'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type':", "= kwargs.get('role', None) self.authentication = kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference", "Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param", "def __init__( self, **kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream']", "topology. :type name: str :param transport: Network transport utilized by", "be effective within 24 hours. :type retention_period: str \"\"\" _attribute_map", "\"\"\"Describes a built-in preset for encoding the input content using", "\"2021-10-05T03:40:00Z\"]]'. :type ranges: str \"\"\" _validation = { 'type': {'required':", "kwargs.get('name', None) self.type = kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check", ":param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _validation", "\"Batch_S1\". :type name: str or ~video_analyzer.models.SkuName :ivar tier: The SKU", "The default value for the parameter to be used if", "the access level granted by this policy. Possible values include:", "of selected portions of archived content. Variables are only populated", "An optional description for the pipeline. :type description: str :param", "_attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def", "None) self.origin = kwargs.get('origin', None) self.properties = kwargs.get('properties', None) self.is_data_action", "kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): \"\"\"The video scaling information. :param height:", "The additional info. :vartype info: any \"\"\" _validation = {", "last_modified_by: The identity that last modified the resource. :type last_modified_by:", "'#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = kwargs.get('transport', None) self.endpoint =", "has_data: bool :param is_in_use: Required. Value indicating whether or not", "self.type = kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check availability result.", "{'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at':", "to_be_exported_for_shoebox: Whether to export metric to shoebox. :vartype to_be_exported_for_shoebox: bool", "'str'}, 'medium': {'key': 'medium', 'type': 'str'}, 'large': {'key': 'large', 'type':", "a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted", "will be auto-rotated as long as the module is able", "kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level network access control. :param", "~video_analyzer.models.VideoArchival \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "{'key': 'principalId', 'type': 'str'}, } def __init__( self, **kwargs ):", "\"\"\"A remote tunnel securely established using IoT Hub device information.", "the parameter. :type description: str :param default: The default value", "SKU name. Possible values include: \"Live_S1\", \"Batch_S1\". :type name: str", "may not be accessible at the time. :type is_in_use: bool", "'str'}, } def __init__( self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name", "Public network access for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\"", "{'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts':", "the resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\". :vartype type: str :ivar", "the service. Possible values include: \"Pending\", \"Approved\", \"Rejected\". :type status:", "_attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key':", "{'required': True}, 'url': {'required': True}, } _attribute_map = { 'type':", "'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def", "{ 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = {", "for more information. All required parameters must be populated in", "entry. :type certificates: list[str] \"\"\" _validation = { 'type': {'required':", "__init__( self, **kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type = None #", "self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description", ":param claims: List of additional token claims to be validated.", "'type': 'str'}, 'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'}, } def __init__(", "input of the current node. :type node_name: str \"\"\" _validation", "RTSP camera. Variables are only populated by the server, and", "self.last_modified_at = kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence of datetime", "must be present on the token. :type name: str :param", "'type': '[MetricSpecification]'}, } def __init__( self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs)", "str \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[LivePipeline]'},", "'@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LivePipelineCollection,", "{'key': 'tags', 'type': '{str}'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts':", "age of the video archive segments which are intended to", "parameters. A pipeline can only define or override parameters values", "str or ~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map = { 'public_network_access': {'key': 'publicNetworkAccess',", "{'key': 'small', 'type': 'str'}, 'medium': {'key': 'medium', 'type': 'str'}, 'large':", "upstream node in the pipeline which output is used as", "to generate registration token for the Azure Video Analyzer IoT", "{'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration':", "'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__(", "for the user-defined topology parameters. A pipeline can only define", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'},", "'role': {'key': 'properties.role', 'type': 'str'}, 'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'},", "state of the pipeline (read-only). Possible values include: \"Inactive\", \"Activating\",", "'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type':", "seconds increments. :type segment_length: str \"\"\" _attribute_map = { 'segment_length':", "private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map = {", "a new or existing video resource used to capture and", "_validation = { 'type': {'required': True}, 'username': {'required': True}, 'password':", "server. :type type: str :param ranges: Required. The sequence of", "self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value =", "None) self.type_properties_type = None self.flags = None self.content_urls = None", "display_name: str :ivar blob_duration: The time range for requests in", "time. :type is_in_use: bool \"\"\" _validation = { 'can_stream': {'required':", "Changes may cause incorrect behavior and will be lost if", "} def __init__( self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url =", "{ 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type':", "topology source nodes. Source nodes enable external data to be", "PipelineJob items. :param value: A collection of PipelineJob items. :type", "\"Bool\". :type type: str or ~video_analyzer.models.ParameterType :param description: Description of", "of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\" _validation = {", "display_description: The metric display description. :vartype display_description: str :ivar unit:", "'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type':", "result. :param name_available: Indicates if the resource name is available.", "transport: str or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint information", "server. :type type: str \"\"\" _validation = { 'type': {'required':", "): super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class", "192, 224, and 256. If omitted, the bitrate of the", "name is not available. Possible values include: \"Invalid\", \"AlreadyExists\". :type", "'str'}, } def __init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type", "\"\"\"A collection of VideoAnalyzer items. :param value: A collection of", "URLs to the video content. :param download_url: Video file download", "{'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, }", "to 300. If omitted, the encoder uses the average frame", "= { 'id': {'readonly': True}, } _attribute_map = { 'id':", "'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJob, self).__init__(**kwargs)", "for different applications and scenarios. Possible values include: \"Archive\", \"File\".", "def __init__( self, **kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name = None", "kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology parameter declaration. Declared parameters", "multiple keys allow for seamless key rotation of the token", "\"Count\", \"Milliseconds\". :vartype unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The", "Reference to an existing pipeline topology. When activated, this pipeline", "y: Required. Y coordinate. :type y: str \"\"\" _validation =", "twin properties. :vartype token: str \"\"\" _validation = { 'expiration_date':", "Hub. :type iot_hub_name: str :param device_id: Required. The IoT device", "access policies authentication methods. You probably want to use the", "'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl',", "identifier. :type name: str :param id: Operation resource ID. :type", "'type': 'SystemData'}, 'title': {'key': 'properties.title', 'type': 'str'}, 'description': {'key': 'properties.description',", "cameras. * Processors: list of nodes which perform data analysis", "'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku',", "to the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains", "= kwargs.get('frame_rate', None) self.scale = kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): \"\"\"A", "offline processing of selected portions of archived content. Variables are", "video archive in different resolutions. They are available when the", ":type scale: ~video_analyzer.models.VideoScale \"\"\" _validation = { 'type': {'required': True},", "{'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map", "width or height need be provided. Possible values include: \"Pad\",", "'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'},", "or ~video_analyzer.models.ParameterType :param description: Description of the parameter. :type description:", "video_name: Required. Name of a new or existing video resource", "images are enabled. :param small: Low resolution preview image URL.", "{'required': True}, } _attribute_map = { 'node_name': {'key': 'nodeName', 'type':", "def __init__( self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status',", "VideoEncoderH264(VideoEncoderBase): \"\"\"A custom preset for encoding video with the H.264", "are: EncoderProcessor. All required parameters must be populated in order", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def", "for example, when the topology is used only for low", "APIs to return error details for failed operations. (This also", "the current video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set", "authentication rules, and control access to specific video resources. Variables", "send to Azure. :param id: Required. The IoT Hub resource", "'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames',", "if the pipeline does not specify a value. :type default:", "'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description': {'key': 'description', 'type':", "self.kid = kwargs['kid'] class EccTokenKey(TokenKey): \"\"\"Required validation properties for tokens", "'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type':", "~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation type. Possible values include:", "values include: \"Standard\". :vartype tier: str or ~video_analyzer.models.SkuTier \"\"\" _validation", "upstream node references within the topology to be used as", "class VideoFlags(msrest.serialization.Model): \"\"\"Video flags contain information about the available video", "Required. Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku", "across the topology nodes. * Sources: list of one or", "captured, optionally archived, and published via a video resource. If", "pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology", "{'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, }", "Azure. :param name: Required. Name of the claim which must", "Default mode is 'Pad'. If the mode is 'Pad' or", "for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or", "on the token. :type name: str :param value: Required. Expected", "'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key':", ":vartype type: str :ivar system_data: Azure Resource Manager metadata containing", ":type title: str :param description: Optional description provided by the", "password credentials. All required parameters must be populated in order", "'str'}, 'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'}, } def __init__( self,", "{ 'type': {'required': True}, } _attribute_map = { 'type': {'key':", "for a failed pipeline job. :param code: The error code.", "link resource required member names. :vartype required_members: list[str] :param required_zone_names:", "provider. :param status: Indicates whether the connection has been Approved/Rejected/Removed", "'#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model): \"\"\"The", "- DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording can", "None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence of absolute datetime ranges as", "__init__( self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "'str'}, 'x': {'key': 'x', 'type': 'str'}, 'y': {'key': 'y', 'type':", ":type tags: dict[str, str] :param identity: The identities associated to", "{'key': 'claims', 'type': '[TokenClaim]'}, 'keys': {'key': 'keys', 'type': '[TokenKey]'}, }", "batch topology, this allows for video and audio to be", "def __init__( self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None", "Analyzer to connect to. :type url: str :param tunnel: Describes", "affect other live pipelines in your account. :type bitrate_kbps: int", "geo-location where the resource lives. :type location: str \"\"\" _validation", "'type': 'str'}, 'ranges': {'key': 'ranges', 'type': 'str'}, } def __init__(", "'true' the RTSP playback URL will not be published, disabling", "'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type': 'VideoArchival'}, } def __init__(", "'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase',", ":type message: str \"\"\" _attribute_map = { 'name_available': {'key': 'nameAvailable',", "\"ManagedIdentity\", \"Key\". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at: The", "self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state", "'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True}, }", "ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input parameters to generate registration token for the", "on the TCP RTSP connection. When using HTTP, the RTSP", "state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when", "resource: str :param operation: The operation type. :type operation: str", "define the recipe or instructions on how the input content", "self.medium = kwargs.get('medium', None) self.large = kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model):", "For instance, a pipeline topology which captures content from a", "used in conjunction with the video content authorization token on", "'id': {'required': True}, 'status': {'readonly': True}, } _attribute_map = {", "self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg = kwargs['alg'] self.x", "'VideoArchival'}, } def __init__( self, **kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title", "self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.segment_length", "'#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg = kwargs['alg'] self.x = kwargs['x']", "'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs)", "sinks: List of the topology sink nodes. Sink nodes allow", "} def __init__( self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value =", "The allowed range is from 500 to 3000 Kbps in", "of the parameter declared in the pipeline topology. :type name:", "{ 'small': {'key': 'small', 'type': 'str'}, 'medium': {'key': 'medium', 'type':", "operations. (This also follows the OData error response format.). :param", "'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def", "with latencies which are approximately double of the chosen video", "None self.content_urls = None self.media_info = kwargs.get('media_info', None) self.archival =", "{ 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'blob_duration': {'readonly': True},", "title: str :param description: Optional video description provided by the", "and will be ignored when sending a request. :ivar name:", "): super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use", ":ivar error: The error details for the pipeline job operation.", "} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'status':", "of the Video Analyzer account. Possible values include: \"Failed\", \"InProgress\",", "which has 'tags' and a 'location'. Variables are only populated", "a single video. :vartype token: str \"\"\" _validation = {", "resource management error additional info. Variables are only populated by", "Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). :type id: str", "'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type':", "exists. :param title: Optional title provided by the user. Value", "list of nodes which perform data analysis or transformations. *", "**kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str", "kid: str :param alg: Required. RSA algorithm to be used:", "end_time: str :param status: Operation status. :type status: str :param", "'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type':", "list[~video_analyzer.models.VideoEntity] :param next_link: A link to the next page of", "'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } def __init__(", "self.error = None self.parameters = kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): \"\"\"A", "the current node. :type node_name: str \"\"\" _validation = {", ":ivar service_specification: The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification \"\"\" _validation", "kwargs.get('medium', None) self.large = kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags", ":ivar name: The diagnostic log category name. :vartype name: str", "endpoint information for Video Analyzer to connect to. This contains", "example, when the topology is used only for archiving content.", "in a batch topology, this allows for video and audio", ":vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types.", "None) self.claims = kwargs.get('claims', None) self.keys = kwargs.get('keys', None) class", "iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access", "'audiences', 'type': '[str]'}, 'claims': {'key': 'claims', 'type': '[TokenClaim]'}, 'keys': {'key':", ":param key_vault_properties: The properties of the key used to encrypt", "256. If omitted, the bitrate of the input audio is", "exporting sequences from existing captured video through a pipeline job.", "Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of", "self.can_stream = kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class", "of the live pipeline operation. :vartype status: str :ivar error:", "information about the video and audio content. :param segment_length: Video", "'type': 'EncoderPresetBase'}, } def __init__( self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs)", "'name': {'required': True}, 'type': {'required': True}, } _attribute_map = {", ":type x: str :param y: Required. Y coordinate. :type y:", "'value', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnectionListResult,", "specific video resources. Variables are only populated by the server,", "a single instance of Azure Video analyzer IoT edge module", "name: The metric name. :vartype name: str :ivar display_name: The", "'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs)", "'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__( self, **kwargs", "~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom preset for encoding video.", "value: Array of private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] \"\"\"", "kwargs.get('transport', None) self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): \"\"\"Base class for", "analyzer IoT edge module to be initialized and authorized to", "the source. :type rtsp_tunnel_url: str :param preview_image_urls: Video preview image", "'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo',", "Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status", "\"\"\"Network access control for video analyzer account. :param integration: Public", ":param retention_period: Video retention period indicates how long the video", "Possible values include: \"Average\", \"Count\", \"Total\". :vartype aggregation_type: str or", "'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } _subtype_map", "topology parameter declaration. Declared parameters can and must be referenced", "operation type. :type operation: str :param description: The operation description.", "Kbps. If the RTSP camera exceeds this capacity, then the", "authentication keys which will be auto-rotated as long as the", "(segments) which are persisted to storage. Smaller segments provide lower", "): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link',", "on tables, queues, and blobs. The primary storage account must", "self.error = kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of private endpoint", "order to send to Azure. :param name: Required. Operation identifier.", "kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic log emitted by service.", "connect to the endpoint URL. This is an optional property,", "~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted certificate authorities when authenticating", "be ignored when sending a request. All required parameters must", "the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List", "both width and height must be specified. Else if the", "True}, 'system_data': {'readonly': True}, 'type_properties_type': {'readonly': True}, 'flags': {'readonly': True},", "can be defined through the use of user-defined parameters, which", "super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg =", "types.Constant filled by server. :type type: str :param iot_hub_name: Required.", "less. Currently, there can be only one range specified in", "): super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport", "'#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__( self, **kwargs ): super(TokenKey, self).__init__(**kwargs)", "GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level network access control. :param public_network_access: Whether or", "to an existing pipeline topology defined for real-time content processing.", "\"\"\"Video flags contain information about the available video actions and", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__(", "will not be published, disabling low latency streaming. This is", "endpoint connection associated with the specified storage account. :param value:", "sub-classes are: UsernamePasswordCredentials. All required parameters must be populated in", "= kwargs['e'] class SourceNodeBase(NodeBase): \"\"\"Base class for topology source nodes.", "{'readonly': True}, 'display_name': {'readonly': True}, 'blob_duration': {'readonly': True}, } _attribute_map", ":vartype name: str :ivar display_name: The diagnostic log category display", "Key. Possible values include: \"SystemKey\", \"CustomerKey\". :type type: str or", ":param n: Required. RSA public key modulus. :type n: str", "status of the pipeline job operation. :vartype status: str :ivar", "'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type':", "The content token value to be added to the video", "Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters must", "__init__( self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "individual content files (segments) which are persisted to storage. Smaller", "{ 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, } def __init__( self,", "to Azure. :param name: Required. Name of the parameter. :type", "neighbor' does not affect other live pipelines in your account.", "small: str :param medium: Medium resolution preview image URL. :type", "After the initial handshake, the IoT edge module will agree", "signal to be used on a pipeline node. All required", "Different content types are suitable for different applications and scenarios.", "optional description for the pipeline. :type description: str :param bitrate_kbps:", "'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__(", "this class directly. Known sub-classes are: VideoEncoderH264. All required parameters", "to be created on the service. These will not take", "the input video. :type frame_rate: str :param scale: Describes the", "'type': 'GroupLevelAccessControl'}, } def __init__( self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs)", "self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type = None # type:", "the dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox: Whether to export", "pipeline job. :param code: The error code. :type code: str", "self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type =", "account. :param integration: Public network access for integration group. :type", "# type: str self.alg = kwargs['alg'] self.n = kwargs['n'] self.e", "streamed. :type can_stream: bool :param has_data: Required. Value indicating whether", "= kwargs.get('small', None) self.medium = kwargs.get('medium', None) self.large = kwargs.get('large',", "is set to true, then no content is archived. :type", "populated in order to send to Azure. :param type: Required.", "bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live", "160, 192, 224, and 256. If omitted, the bitrate of", "self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials = kwargs['credentials']", "HTTP, the RTSP messages are exchanged through long lived HTTP", "be reused across many different cameras, as long as the", "optional default values and can later be defined in individual", "of the video archive segments which are intended to be", "= None class SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining to creation and last", "be ignored when sending a request. :ivar expiration_date: The expiration", "_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key':", "specified in ISO8601 duration format (i.e. \"PT30S\" equals 30 seconds)", "'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self,", "= kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status", "True}, 'value': {'required': True}, } _attribute_map = { 'name': {'key':", "the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name:", "= None class VideoArchival(msrest.serialization.Model): \"\"\"Video archival properties. :param retention_period: Video", "self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small', None) self.medium", "= kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of video analyzer operation.", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "__init__( self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id", "TunnelBase(msrest.serialization.Model): \"\"\"Base class for tunnel objects. You probably want to", "'type_properties_type': {'key': 'properties.type', 'type': 'str'}, 'flags': {'key': 'properties.flags', 'type': 'VideoFlags'},", "of PipelineTopology items. :param value: A collection of PipelineTopology items.", "tags and a location. Variables are only populated by the", "the input content should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase \"\"\"", "identity that created the resource. Possible values include: \"User\", \"Application\",", "detail. Variables are only populated by the server, and will", "super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None)", "} def __init__( self, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by =", "kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection of VideoEntity items. :param", "years. For example, if this is set to P30D (30", "to be used as inputs for this node. :type inputs:", "str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public key modulus.", "title: str :param description: Optional description provided by the user.", "'Pad'. If the mode is 'Pad' or 'Stretch' then both", "'str'}, 'kid': {'key': 'kid', 'type': 'str'}, } _subtype_map = {", "True}, 'is_in_use': {'required': True}, } _attribute_map = { 'can_stream': {'key':", ":param resource: Resource on which the operation is performed. :type", "video and audio to be stored as a file, and", "'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources',", "of URLs to the video content. :param download_url: Video file", "def __init__( self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name',", "Value can be up to 2048 characters long. :type description:", "\"\"\"The check availability request body. :param name: The name of", "= None self.parameters = kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection", ":vartype supported_time_grain_types: list[str] \"\"\" _validation = { 'name': {'readonly': True},", ":type name: str :param video_name: Required. Name of the Video", "'type': 'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy',", "connect to over TLS transport (data is encrypted in transit).", "'keys', 'type': '[TokenKey]'}, } def __init__( self, **kwargs ): super(JwtAuthentication,", "= None self.parameters = kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic", "topology nodes. * Sources: list of one or more data", "__init__( self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type = None #", "additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation = { 'code':", "'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type': 'VideoArchival'}, } def", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map", "link DNS zone name. :type required_zone_names: list[str] \"\"\" _validation =", "= { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName',", "'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'x': {'key': 'x',", "self.token = None class VideoContentUrls(msrest.serialization.Model): \"\"\"Set of URLs to the", "source MDM account. :vartype source_mdm_account: str :ivar source_mdm_namespace: The source", "matches at least one of the given values. :type issuers:", "'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'},", "are looked up based on the key id present on", "or ~video_analyzer.models.CreatedByType :param created_at: The timestamp of resource creation (UTC).", "self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors =", "when sending a request. :param tags: A set of tags.", "to 256 characters long. :type title: str :param description: Optional", "allowed range is from 500 to 3000 Kbps in increments", "None self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection of VideoAnalyzer", "image URLs. These URLs can be used in conjunction with", "'token', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleProvisioningToken,", "True}, 'alg': {'required': True}, 'n': {'required': True}, 'e': {'required': True},", "the sequence. All required parameters must be populated in order", "_attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'info': {'key':", "'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs)", "is not available. Possible values include: \"Invalid\", \"AlreadyExists\". :type reason:", "name: The metric dimension name. :vartype name: str :ivar display_name:", "'EncoderSystemPreset'} } def __init__( self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type", "ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access", "See License.txt in the project root for license information. #", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineJob]'}, 'next_link':", "'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key':", "{'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ):", "'type': {'required': True}, } _attribute_map = { 'type': {'key': 'type',", "{'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info':", "and not this class directly. Known sub-classes are: EncoderProcessor. All", "can be generated for the same IoT edge module in", "id: str :param start_time: Operation start time. :type start_time: str", "the topology. :type name: str :param video_name: Required. Name of", "of the instance level parameter values for the user-defined topology", "URLs. These URLs can be used in conjunction with the", "{ 'type': {'required': True}, 'ranges': {'required': True}, } _attribute_map =", "are: PemCertificateList. All required parameters must be populated in order", "token expiration date. :vartype expiration_date: ~datetime.datetime :ivar token: The token", "'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength',", "one of the given values. :type audiences: list[str] :param claims:", "of a live topology, used for real-time ingestion, archiving and", "Hub device information. All required parameters must be populated in", "The fact that is being referenced, doesn't necessarily indicate that", "None) self.retention_period = kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type for", "{'key': 'properties.title', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'type_properties_type':", "allow pipeline data to be stored or exported. :type sinks:", "topology parameter declarations. Parameters declared here can be referenced throughout", "self.operation = kwargs.get('operation', None) self.description = kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model):", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind':", "True}, 'kid': {'required': True}, } _attribute_map = { 'type': {'key':", "True}, 'identity': {'required': True}, 'status': {'readonly': True}, } _attribute_map =", "granted by this policy. Possible values include: \"Reader\". :type role:", "pipeline node. All required parameters must be populated in order", "self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time =", "} def __init__( self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type =", "content for a unique RTSP camera. Variables are only populated", "(for example https://vault/keys/mykey/version1) or reference a key without a version", "'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, 'mode': {'key': 'mode',", "{'required': True}, 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly':", "kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): \"\"\"Defines how the Video Analyzer account", "Default is 'false'. :type ignore_hostname: str :param ignore_signature: When set", "{'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status':", "): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs", "class VideoEncoderH264(VideoEncoderBase): \"\"\"A custom preset for encoding video with the", "many results to return in one response). :type next_link: str", "\"\"\"List of private endpoint connection associated with the specified storage", "RTP packets are interleaved on the TCP RTSP connection. When", "} def __init__( self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type =", "up to 24 hours or less. Currently, there can be", "# Changes may cause incorrect behavior and will be lost", "= kwargs.get('description', None) self.actions_required = kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): \"\"\"Metric", "on this specific pipeline. :type value: str \"\"\" _validation =", "\"\"\"The Video Analyzer account. Variables are only populated by the", "custom preset for encoding the input content using the encoder", "topology. :type name: str :param video_name: Required. Name of the", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id:", "Name of the parameter declared in the pipeline topology. :type", "This allows individual pipelines refer to different values, such as", "the \"token\" query string parameter. The token is specific to", "the JWT token header. :type kid: str \"\"\" _validation =", "Possible values include: \"Live\", \"Batch\". :type kind: str or ~video_analyzer.models.Kind", "kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None)", "def __init__( self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor'", "'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type':", "video content URLs.\". Variables are only populated by the server,", "'false'. :type disable_archive: str :param disable_rtsp_publishing: When set to 'true'", "= kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error = kwargs.get('error',", "processor nodes. You probably want to use the sub-classes and", "'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, } def __init__( self, **kwargs", "\"\"\"Video sink in a live topology allows for video and", "to data-plane. :type is_data_action: bool :param action_type: Indicates the action", "specified. Else if the mode is 'PreserveAspectRatio' then only one", "str :ivar supported_time_grain_types: The supported time grain types. :vartype supported_time_grain_types:", "the initial handshake between IoT edge module and the cloud.", "name: Required. Name of the parameter declared in the pipeline", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource':", "): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name", "a string. You probably want to use the sub-classes and", "self.role = kwargs.get('role', None) self.authentication = kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model):", "value for the parameter to be used if the pipeline", "or camera may not be accessible at the time. :type", "= None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate", "): super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type'] self.description", "'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, } def", ":ivar display_name: The diagnostic log category display name. :vartype display_name:", "the parameter. :type name: str :param type: Required. Type of", "**kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str", "_attribute_map = { 'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key':", "the average frame rate of the input video. :type frame_rate:", "or ~video_analyzer.models.SkuTier \"\"\" _validation = { 'name': {'required': True}, 'tier':", "Analyzer account. Possible values include: \"Failed\", \"InProgress\", \"Succeeded\". :vartype provisioning_state:", "'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'}", "of PEM formatted certificates. All required parameters must be populated", "'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(SystemData, self).__init__(**kwargs)", "'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction',", "ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} }", "root for license information. # Code generated by Microsoft (R)", "__init__( self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None)", "} def __init__( self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type =", "expiration_date: ~datetime.datetime :ivar token: The token blob to be provided", "'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'},", "Assigned Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation =", "\"\"\"Common error response for all Azure Resource Manager APIs to", "to Azure. :param id: Required. The IoT Hub resource identifier.", "{'key': 'clientId', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, }", "} _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, }", "'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types':", "the status of an operation on the pipeline job. Variables", "True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key':", "{'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor':", "super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.location = kwargs['location'] class", "sub-classes are: PemCertificateList. All required parameters must be populated in", "Required. The SKU name. Possible values include: \"Live_S1\", \"Batch_S1\". :type", "Changing this value after the initial call to create the", "validation_options: Validation options to use when authenticating a TLS connection.", ":param video_name: Required. Name of a new or existing video", ":type segment_length: str :param retention_period: Video retention period indicates how", "ISO8601 duration format in the granularity of days, up to", "be achieved and can be reused across many pipeline instances", "EncoderSystemPreset. All required parameters must be populated in order to", "or not public network access is allowed for specified resources", "__init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = kwargs['name'] self.display", "{'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs)", "group id. :vartype group_id: str :ivar required_members: The private link", "'@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } def", "'type', 'type': 'str'}, 'info': {'key': 'info', 'type': 'object'}, } def", "= kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional", "def __init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers'", "scaling mode to be applied. Default mode is 'Pad'. If", "kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource management error additional info.", "= kwargs['y'] class EdgeModuleEntity(ProxyResource): \"\"\"The representation of an edge module.", "(when the collection contains too many results to return in", "start_time: Operation start time. :type start_time: str :param end_time: Operation", "super(LogSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.blob_duration =", "connection associated with the specified storage account. :param value: Array", "authorities when authenticating a TLS connection. A null list designates", "Vault key used to encrypt the account. The key may", "{'key': 'alg', 'type': 'str'}, 'x': {'key': 'x', 'type': 'str'}, 'y':", "for the instance of the Video Analyzer edge module. :vartype", "): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None)", "low latency streaming. This is used, for example, when the", "# type: str self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource): \"\"\"Pipeline job", "type: Required. The type of key used to encrypt the", "Network transport utilized by the RTSP and RTP exchange: TCP", "kwargs.get('issuers', None) self.audiences = kwargs.get('audiences', None) self.claims = kwargs.get('claims', None)", "str :param y: Required. Y coordinate. :type y: str \"\"\"", "{ 'type': {'required': True}, } _attribute_map = { 'endpoint_url': {'key':", "def __init__( self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind',", "'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def __init__(", "= { 'endpoint_url': {'key': 'endpointUrl', 'type': 'str'}, 'type': {'key': 'type',", ":ivar type: The additional info type. :vartype type: str :ivar", "\"Active\", \"Deactivating\". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List", "'[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code", "Video retention period indicates how long the video is kept", "def __init__( self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title',", "and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state", "\"\"\" _validation = { 'name': {'required': True}, } _attribute_map =", ":type kid: str :param alg: Required. RSA algorithm to be", "next_link: str \"\"\" _attribute_map = { 'value': {'key': 'value', 'type':", "kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption', None)", "'type': 'str'}, } def __init__( self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs)", "for failed operations. (This also follows the OData error response", ":vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The private link resource group", "content. For example, it can used to change the resolution", "\"live\". :param disable_archive: When set to 'true' content will not", "{'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, }", "Contains information about the video and audio content. :type media_info:", "pipeline job will be automatically deleted from your account. :vartype", "properties: Operation properties format. :type properties: ~video_analyzer.models.Properties :param is_data_action: Whether", "= None class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of private endpoint connection associated", "assigned managed identity to use when accessing a resource. All", "self).__init__(**kwargs) self.name = None self.display_name = None self.blob_duration = None", ":type disable_rtsp_publishing: str \"\"\" _attribute_map = { 'disable_archive': {'key': 'disableArchive',", "link resource. Variables are only populated by the server, and", "include: \"Bytes\", \"Count\", \"Milliseconds\". :vartype unit: str or ~video_analyzer.models.MetricUnit :ivar", "kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None)", ":type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map = { 'download_url': {'key': 'downloadUrl',", "{'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description':", "the storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current", "~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags. :type", "Alternatively, this URL can be used in conjunction with the", "for Video Analyzer to connect to RTSP cameras and/or generic", "'value', 'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def", "be checked. :type name: str :param type: The resource type.", "str :ivar status: The status of the live pipeline operation.", "'system_data': {'readonly': True}, 'type_properties_type': {'readonly': True}, 'flags': {'readonly': True}, 'content_urls':", "= None class PrivateEndpointConnection(Resource): \"\"\"The Private Endpoint Connection resource. Variables", "for real-time ingestion, archiving and publishing of content for a", "or \"Microsoft.Storage/storageAccounts\". :vartype type: str :ivar system_data: Azure Resource Manager", "Internet prior to the token expiration date. :vartype expiration_date: ~datetime.datetime", "type. :type type: str :param user_assigned_identities: The User Assigned Managed", "\"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type name: str or ~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation =", "within the topology. :type name: str :param inputs: Required. An", "'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'}, } def __init__( self, **kwargs", "'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'action_type': {'key': 'actionType', 'type':", "= kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check availability result. :param", "data to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase]", "1 day to 10 years, in 1 day increments. When", "references within the topology to be used as inputs for", "of the input video. :type bitrate_kbps: str :param frame_rate: The", "{ 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type':", "sum of the ranges should add up to 24 hours", "True}, 'kid': {'required': True}, 'alg': {'required': True}, 'x': {'required': True},", "'type': 'SystemData'}, 'role': {'key': 'properties.role', 'type': 'str'}, 'authentication': {'key': 'properties.authentication',", "a batch topology, this allows for video and audio to", "'str'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'time_sequences': {'key': 'timeSequences', 'type':", "Azure Video Analyzer player widget. Alternatively, this URL can be", "job represents a unique instance of a batch topology, used", "PrivateEndpointConnection(Resource): \"\"\"The Private Endpoint Connection resource. Variables are only populated", ":type video_name: str :param video_creation_properties: Optional video properties to be", "level resource which has 'tags' and a 'location'. Variables are", "resolution preview image URL. :type small: str :param medium: Medium", "type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote tunnel securely established using", ":param integration: Public network access for integration group. :type integration:", "location. Variables are only populated by the server, and will", "~video_analyzer.models.SystemData :param private_endpoint: The resource of private end point. :type", "be automatically played by the Azure Video Analyzer player widget.", "modification (UTC). :type last_modified_at: ~datetime.datetime \"\"\" _attribute_map = { 'created_by':", "low-latency streaming URL. The live content can be automatically played", "{'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace':", "or ~video_analyzer.models.Kind :param sku: Required. Describes the properties of a", "be presented as part of the credentials. It is recommended", "IoT edge module through the Azure IoT Edge module twin", "} def __init__( self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small =", "self.type = kwargs['type'] self.description = kwargs.get('description', None) self.default = kwargs.get('default',", "retention_period: str \"\"\" _attribute_map = { 'retention_period': {'key': 'retentionPeriod', 'type':", "{'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly':", "resource within Azure Video Analyzer. Videos can be ingested from", "of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link", "of the topology parameter declarations. Parameters declared here can be", "the Video Analyzer resource. All required parameters must be populated", "include: \"Pending\", \"Approved\", \"Rejected\". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param", "'tunnel', 'type': 'TunnelBase'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint',", "'small', 'type': 'str'}, 'medium': {'key': 'medium', 'type': 'str'}, 'large': {'key':", "and less than or equal to 300. If omitted, the", "certificates: list[str] \"\"\" _validation = { 'type': {'required': True}, 'certificates':", "and will be ignored when sending a request. :ivar type:", "'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type':", "rate of the input video. :type frame_rate: str :param scale:", "{'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time':", "= kwargs.get('supported_aggregation_types', None) self.dimensions = None self.enable_regional_mdm_account = None self.source_mdm_account", "/manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording", "in order to send to Azure. :param expiration_date: Required. The", "class for certificate sources. You probably want to use the", "public_network_access: str or ~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map = { 'public_network_access': {'key':", "the server, and will be ignored when sending a request.", "Sku(msrest.serialization.Model): \"\"\"The SKU details. Variables are only populated by the", "is able to periodically connect to the cloud. A new", "by server. :type type: str :param kid: Required. JWT token", "{'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs':", "dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox: Whether to export metric", "pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of", "for the pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation", "Azure. :param type: Required. The identity type. :type type: str", "the video content URLs.\". Variables are only populated by the", "granularity of days, up to a maximum of 10 years.", "the encryption keys in Key Vault. Variables are only populated", ":param bitrate_kbps: The maximum bitrate, in kilobits per second or", "An optional description of the pipeline topology. It is recommended", "metric emitted by service. Variables are only populated by the", "{'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, }", "whether or not the video is currently being referenced be", "_validation = { 'type': {'required': True}, 'name': {'required': True}, 'video_name':", "A pipeline can only define or override parameters values for", "E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\". :vartype type: str :ivar system_data: Azure", "JwtAuthentication. All required parameters must be populated in order to", "Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'},", "HTTP connections alongside the RTSP messages. Possible values include: \"Http\",", "cameras' RTSP endpoints and credentials. Overall a topology is composed", "on JSON Web Tokens (JWT). All required parameters must be", "be ignored when sending a request. :ivar expiration_date: The content", "{ 'id': {'required': True}, 'identity': {'required': True}, 'status': {'readonly': True},", "to be skipped. Default is 'false'. :type ignore_signature: str \"\"\"", "_validation = { 'type': {'required': True}, 'certificates': {'required': True}, }", "Possible values include: \"ES256\", \"ES384\", \"ES512\". :type alg: str or", "'principal_id': {'key': 'principalId', 'type': 'str'}, } def __init__( self, **kwargs", "'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},", "kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type for all video encoding", "Azure Video analyzer IoT edge module to be initialized and", "value: Parameter value to be applied on this specific pipeline.", "parameter declared in the pipeline topology. :type name: str :param", "and not this class directly. Known sub-classes are: PemCertificateList. All", "override parameters values for parameters which have been declared in", "str \"\"\" _validation = { 'type': {'required': True}, } _attribute_map", "RTP exchange: TCP or HTTP. When using TCP, the RTP", "'ranges', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers,", "VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties to be used in case a new", "'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},", "disable_rtsp_publishing: When set to 'true' the RTSP playback URL will", "If set to 'true', then \"disableRtspPublishing\" must be set to", "'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly': True}, }", "module. All required parameters must be populated in order to", "'kid': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "in storage. It must be provided in the ISO8601 duration", "should be processed. You probably want to use the sub-classes", "'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState',", "resource. Video Analyzer relies on tables, queues, and blobs. The", "'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)", "PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link to", "populated in order to send to Azure. :param node_name: Required.", "this class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters", "resource. Possible values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type created_by_type:", "about the video and audio content. :param segment_length: Video segment", "authentication: ~video_analyzer.models.AuthenticationBase \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "{'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location':", "'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, **kwargs", "{'key': 'properties.authentication', 'type': 'AuthenticationBase'}, } def __init__( self, **kwargs ):", "Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data:", "edge module must be initialized and connected to the Internet", "self, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description", "} def __init__( self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id =", "sub-classes and not this class directly. Known sub-classes are: PemCertificateList.", "VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type for all video encoding presets, which define", "{'required': True}, } _attribute_map = { 'can_stream': {'key': 'canStream', 'type':", "\"\"\"A collection of AccessPolicyEntity items. :param value: A collection of", "token validation. You probably want to use the sub-classes and", "True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly': True},", "} def __init__( self, **kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id =", "super(NodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name =", "**kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "None class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type for all encoder presets, which", "lived and it is only used for the initial handshake", "str or ~video_analyzer.models.CreatedByType :param created_at: The timestamp of resource creation", "credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video", ":type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for", "'width', 'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'}, } def", "{'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, }", "**kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str", "default value must be defined. Topology parameters with a default", "ignored when sending a request. :param tags: A set of", "_validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'blob_duration':", "to 'true' causes the certificate subject name validation to be", "def __init__( self, **kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification = None", "one or more data sources nodes such as an RTSP", "resource lives. :type location: str :param identity: The identities associated", "'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account':", "{ 'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'}, } def __init__( self,", "MP4 file. The resulting MP4 file can be played on", "instance, a pipeline topology which captures content from a RTSP", "'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'}, } def __init__( self, **kwargs", "and not this class directly. Known sub-classes are: UsernamePasswordCredentials. All", "latency streaming. This is used, for example, when the topology", "\"live mode\" with latencies which are approximately double of the", ":type name: str :param type: The resource type. :type type:", "The desired expiration date of the registration token. The Azure", "to send to Azure. :param id: Required. The IoT Hub", "'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type':", "with the AAC codec. All required parameters must be populated", "sending a request. :ivar name: The name of the pipeline", "format (i.e. \"PT30S\" equals 30 seconds) and can vary between", "is kept in storage. Value must be specified in ISO8601", "self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message =", "algorithm. All required parameters must be populated in order to", "): super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str] class", "value: A collection of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param", "'id', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpoint,", "declarations. Parameters declared here can be referenced throughout the topology", "**kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description =", "for encoding audio with the AAC codec. All required parameters", "str or ~video_analyzer.models.Kind :param sku: Describes the properties of a", "include: \"Pad\", \"PreserveAspectRatio\", \"Stretch\". :type mode: str or ~video_analyzer.models.VideoScaleMode \"\"\"", "message: str :ivar target: The error target. :vartype target: str", "and not this class directly. Known sub-classes are: VideoSink. All", ":vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information about the video", "is enabled, this results in a video of type 'archive'.", "video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs", "or ES512. Possible values include: \"ES256\", \"ES384\", \"ES512\". :type alg:", "= None self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions", ":param description: Optional description provided by the user. Value can", "value must be defined. Topology parameters with a default value", "video analyzer account. :param integration: Public network access for integration", "PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection of information about the state of the", "video segment length. It is available when the video type", "{'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description':", "kwargs['node_name'] class Operation(msrest.serialization.Model): \"\"\"An operation. All required parameters must be", "self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class", "used on a pipeline node. All required parameters must be", "server. :type type: str :param certificates: Required. PEM formatted public", "details for failed operations. (This also follows the OData error", "None) class VideoEncoderH264(VideoEncoderBase): \"\"\"A custom preset for encoding video with", "def __init__( self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None", "'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'}, } def", ":param name: Required. Operation identifier. :type name: str :param id:", "Only \"archive\" type videos can be streamed. :type can_stream: bool", "class StorageAccount(msrest.serialization.Model): \"\"\"The details about the associated storage account. Variables", ":param user_assigned_identities: The User Assigned Managed Identities. :type user_assigned_identities: dict[str,", "str \"\"\" _validation = { 'name': {'readonly': True}, 'display_name': {'readonly':", "as input of the current node. :type node_name: str \"\"\"", "encrypt the Account Key. Possible values include: \"SystemKey\", \"CustomerKey\". :type", "current_key_identifier: str \"\"\" _validation = { 'key_identifier': {'required': True}, 'current_key_identifier':", "= '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class VideoEntity(ProxyResource): \"\"\"Represents a video", "Manager APIs to return error details for failed operations. (This", "\"Count\", \"Total\". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported", "str \"\"\" _attribute_map = { 'status': {'key': 'status', 'type': 'str'},", "kwargs['type'] self.description = kwargs.get('description', None) self.default = kwargs.get('default', None) class", "by the Video Analyzer resource. Variables are only populated by", "'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def __init__( self, **kwargs", "to 2048 characters long. :type description: str :ivar type_properties_type: Video", "and control access to specific video resources. Variables are only", "_attribute_map = { 'endpoint_url': {'key': 'endpointUrl', 'type': 'str'}, 'type': {'key':", "super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.description = kwargs.get('description', None)", "to the token expiration date. :vartype expiration_date: ~datetime.datetime :ivar token:", "parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes.", "the resource lives. :type location: str :param identity: The identities", "= kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A", "self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = kwargs['name'] self.display =", "for the live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation", "The name of the resource for which availability needs to", "overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation = { 'id': {'readonly':", "SourceNodeBase. All required parameters must be populated in order to", "preset. Possible values include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type name:", "= None # type: Optional[str] class TokenKey(msrest.serialization.Model): \"\"\"Key properties for", "Required. The operation name. :type name: str :param display: The", "{'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at':", "kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of video analyzer operation. All", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'role': {'key': 'properties.role',", "live pipeline. Variables are only populated by the server, and", "def __init__( self, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name']", "str :param type: The resource type. :type type: str \"\"\"", "The principal ID. :vartype principal_id: str \"\"\" _validation = {", "= { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name',", "= '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg = kwargs['alg'] self.n =", "str \"\"\" _validation = { 'user_assigned_identity': {'required': True}, } _attribute_map", "self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None) self.archive_base_url", "status of the Key Vault mapping. :vartype status: str \"\"\"", "for this node. :type inputs: list[~video_analyzer.models.NodeInput] \"\"\" _validation = {", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The", "self.type = None # type: Optional[str] class CertificateSource(msrest.serialization.Model): \"\"\"Base class", "'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'},", "can vary between 30 seconds to 5 minutes, in 30", "The URL of the Key Vault key used to encrypt", "RSA algorithm. All required parameters must be populated in order", "'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key':", "'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__( self, **kwargs ): super(ProcessorNodeBase,", "~video_analyzer.models.OperationDisplay :param origin: Origin of the operation. :type origin: str", "content. :param segment_length: Video segment length indicates the length of", "= kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags used to change", "is available. :type name_available: bool :param reason: The reason why", "directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters must be", "content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information about the", "'type', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default': {'key':", "instance level parameter values for the user-defined topology parameters. A", "kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): \"\"\"Describes an", "'@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def", "node_name: str \"\"\" _validation = { 'node_name': {'required': True}, }", "str :param disable_rtsp_publishing: When set to 'true' the RTSP playback", "pipelines can be streamed through Azure Video Analyzer Player Widget", "): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkResource(Resource): \"\"\"A", "'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters',", "kwargs['name'] self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time", "} def __init__( self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind =", "kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for controlling the validation of", "\"\"\"Defines the parameter value of an specific pipeline topology parameter.", "The name of the pipeline job operation. :vartype name: str", "= kwargs['credentials'] self.url = kwargs['url'] self.tunnel = kwargs.get('tunnel', None) class", "{ 'id': {'key': 'id', 'type': 'str'}, } def __init__( self,", "endpoint that the pipeline can connect to over clear transport", "} def __init__( self, **kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period =", "'username': {'required': True}, 'password': {'required': True}, } _attribute_map = {", "source. :type video_name: str :param time_sequences: Required. Describes a sequence", "content. :param download_url: Video file download URL. This URL can", "super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class TokenKey(msrest.serialization.Model):", "'#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options =", "set to 'true' causes the certificate subject name validation to", "topology_name: Reference to an existing pipeline topology. When activated, this", "'value': {'key': 'value', 'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'},", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase':", "The source MDM namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types: The", ":vartype service_specification: ~video_analyzer.models.ServiceSpecification \"\"\" _validation = { 'service_specification': {'readonly': True},", "used in a batch topology, this allows for video and", "user-defined topology parameters. A pipeline can only define or override", "'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs)", "'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'},", "_attribute_map = { 'download_url': {'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key':", "{ 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True},", "of information about the state of the connection between service", "'info': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type',", "kwargs.get('kind', None) self.sku = kwargs.get('sku', None) self.description = kwargs.get('description', None)", "The resulting MP4 file can be played on any standard", "'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal", ":type name: str or ~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation = { 'type':", "sub-classes and not this class directly. Known sub-classes are: RtspSource,", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, }", "device id to use when establishing the remote tunnel. This", "updated at any time and the new desired retention period", "class Sku(msrest.serialization.Model): \"\"\"The SKU details. Variables are only populated by", "def __init__( self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity',", ":ivar endpoints: The endpoints associated with this resource. :vartype endpoints:", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs = kwargs['inputs']", "List of expected token audiences. Token audience is valid if", "None class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of private endpoint connection associated with", "format. :type properties: ~video_analyzer.models.Properties :param is_data_action: Whether the operation applies", "None self.display_name = None self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model): \"\"\"A", "not public network access is allowed for resources under the", "self.display_name = None self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model): \"\"\"A metric", "True}, 'name': {'required': True}, 'inputs': {'required': True}, 'preset': {'required': True},", "self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase):", "True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, } _attribute_map =", "when authenticating a TLS connection. By default, strict validation is", "str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this", "} def __init__( self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code =", "URLs can be used in conjunction with the video content", "= kwargs.get('bitrate_kbps', None) self.state = None self.parameters = kwargs.get('parameters', None)", "{'required': True}, 'tier': {'readonly': True}, } _attribute_map = { 'name':", "is short lived and it is only used for the", "for tokens generated with Elliptical Curve algorithm. All required parameters", "{'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'preset': {'required':", "Topology kind. Possible values include: \"Live\", \"Batch\". :type kind: str", "encoding video with the H.264 (AVC) codec. All required parameters", "request. :ivar expiration_date: The content token expiration date in ISO8601", "system_data: ~video_analyzer.models.SystemData :param role: Defines the access level granted by", "in order to send to Azure. :param type: Required. The", "If the RTSP camera exceeds this capacity, then the service", "there can be only one range specified in the sequence.", ":param device_id: Required. The IoT device id to use when", "archival: ~video_analyzer.models.VideoArchival \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "not defined in the pipelines. All required parameters must be", "'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type':", "self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls =", "= { 'download_url': {'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl',", "type: str :param certificates: Required. PEM formatted public certificates. One", "aggregation_type: The metric aggregation type. Possible values include: \"Average\", \"Count\",", "camera bitrate is now below the reserved capacity. Doing so", "parameters can and must be referenced throughout the topology and", "{ 'height': {'key': 'height', 'type': 'str'}, 'width': {'key': 'width', 'type':", ":type parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation = { 'id': {'readonly': True},", "'bitrateKbps', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'}", "__init__( self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None)", "is from 500 to 3000 Kbps in increments of 100", "VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection of VideoEntity items. :param value: A collection", "# type: Optional[str] class CertificateSource(msrest.serialization.Model): \"\"\"Base class for certificate sources.", "'type': 'str'}, 'large': {'key': 'large', 'type': 'str'}, } def __init__(", "str :param medium: Medium resolution preview image URL. :type medium:", "dynamic properties based on the current video state. All required", "'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type':", "archival: Video archival properties. :type archival: ~video_analyzer.models.VideoArchival \"\"\" _validation =", "as part of the credentials. It is recommended that this", "} def __init__( self, **kwargs ): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource):", "error details for failed operations. (This also follows the OData", "'y', 'type': 'str'}, } def __init__( self, **kwargs ): super(EccTokenKey,", "The private link resource required member names. :vartype required_members: list[str]", "**kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model):", "'flags': {'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'},", "provider. :type provider: str :param resource: Resource on which the", "{'readonly': True}, 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map", "operation: str :param description: The operation description. :type description: str", "= None # type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a custom", "_validation = { 'type': {'required': True}, 'status': {'readonly': True}, }", "kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.properties = kwargs.get('properties', None)", "self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state", "value: list[~video_analyzer.models.VideoEntity] :param next_link: A link to the next page", "display_name: str :ivar display_description: The metric display description. :vartype display_description:", "= kwargs['name'] self.display = kwargs.get('display', None) self.origin = kwargs.get('origin', None)", "which will be auto-rotated as long as the module is", "class TokenClaim(msrest.serialization.Model): \"\"\"Properties for expected token claims. All required parameters", "all the cameras. Individual instance properties can be defined through", "topology. It is recommended that the expected use of the", "'archive' and preview images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\"", "for topologies where \"kind\" is set to \"live\". :type video_publishing_options:", "_validation = { 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections':", "created videos have this value set to false. :type has_data:", "class directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required parameters", "allows for video and audio to be stored as a", "filled by server. :type type: str :param iot_hub_name: Required. Name", "str \"\"\" _attribute_map = { 'disable_archive': {'key': 'disableArchive', 'type': 'str'},", "to the Azure Video Analyzer IoT edge module through the", "results to return in one response). :type next_link: str \"\"\"", "the input content should be processed. You probably want to", "= { 'type': {'key': 'type', 'type': 'str'}, 'info': {'key': 'info',", "example, if this is set to P30D (30 days), content", "can be streamed. :type can_stream: bool :param has_data: Required. Value", "sending a request. :ivar id: Fully qualified resource ID for", "is available when the video type is 'archive' and a", "pipeline can connect to over clear transport (no encryption in", "PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private Endpoint resource. Variables are only populated by", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs)", ":type type: str :param name: Required. Node name. Must be", "'name': {'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'},", "= { 'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'}, } def __init__(", "the service will disconnect temporarily from the camera. It will", "'@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AccessPolicyEntityCollection,", "and must be referenced throughout the topology and can optionally", "'@type', 'type': 'str'}, 'ranges': {'key': 'ranges', 'type': 'str'}, } def", "= kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None", "only for archiving content. Default is 'false'. If set to", "**kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data = kwargs['has_data']", "ignored when sending a request. :ivar id: Fully qualified resource", "second) of the encoded video. The value must be greater", "Name of the Video Analyzer video resource to be used", "long the video is kept in storage. Value must be", "Video Analyzer IoT edge module through the Azure IoT Edge", "of the endpoint. :type endpoint_url: str :param type: Required. The", "the video archive in different resolutions. They are available when", "is enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The source MDM", "code. :type code: str :param message: The error message. :type", "\"\"\"The details about the associated storage account. Variables are only", "self.resource = kwargs.get('resource', None) self.operation = kwargs.get('operation', None) self.description =", "Required. The desired expiration date of the registration token. The", "that Azure Video Analyzer's list of trusted authorities should be", "module to be initialized and authorized to the cloud account.", "which are persisted to storage. Smaller segments provide lower archive", "} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display':", "'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},", "= None self.status = None self.error = None class PipelineJobUpdate(ProxyResource):", "self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources =", "None) self.created_at = kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type", "allows for a single instance of Azure Video analyzer IoT", "self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id = None class PrivateEndpointConnection(Resource):", "video content URL as the value for the \"token\" query", "kwargs['credentials'] self.url = kwargs['url'] self.tunnel = kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model):", "display_name: str :ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.", "will be ignored when sending a request. :ivar id: The", "account, including the key version. :vartype current_key_identifier: str \"\"\" _validation", "'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "to create the video resource can lead to errors when", "items. :param value: A collection of PipelineJob items. :type value:", "self, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None) self.created_by_type", "self.video_encoder = kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): \"\"\"Base class for nodes.", "None class ResourceIdentity(msrest.serialization.Model): \"\"\"The user assigned managed identity to use", "mode is 'Pad'. If the mode is 'Pad' or 'Stretch'", "\"RS384\", \"RS512\". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required.", "kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): \"\"\"The service metric specifications. Variables are only", "start_time: str :param end_time: Operation end time. :type end_time: str", "the video. Newly created videos have this value set to", ":type value: list[~video_analyzer.models.Operation] \"\"\" _attribute_map = { 'value': {'key': 'value',", "be skipped. Default is 'false'. :type ignore_hostname: str :param ignore_signature:", "topologies where \"kind\" is set to \"live\". :param disable_archive: When", "str :param credentials: Required. Credentials to be presented to the", "VideoAnalyzer(TrackedResource): \"\"\"The Video Analyzer account. Variables are only populated by", "encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs:", ":param status: Operation status. :type status: str :param error: The", "sending a request. :ivar name: The metric dimension name. :vartype", "video content authorization token to download the video MP4 file.", "{'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, } _attribute_map", "{'required': True}, 'value': {'required': True}, } _attribute_map = { 'name':", "ensure that one 'noisy neighbor' does not affect other live", "{'key': 'token', 'type': 'str'}, } def __init__( self, **kwargs ):", "can only define or override parameters values for parameters which", "not there has ever been data recorded or uploaded into", "__init__( self, **kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data", "content. Note: if downstream of RTSP source, and if disableArchive", ":param transport: Network transport utilized by the RTSP and RTP", "The archived content can be automatically played by the Azure", "optional description of the pipeline topology. It is recommended that", "{'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status':", "resource for which availability needs to be checked. :type name:", ":type status: str :param error: The error detail. :type error:", "Video Analyzer video resource to be used as the source.", "'@type', 'type': 'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password': {'key':", "for Video Analyzer to connect to. :type url: str :param", "ignored when sending a request. :ivar expiration_date: The expiration date", "kwargs['kind'] self.sku = kwargs['sku'] self.description = kwargs.get('description', None) self.parameters =", "{'key': 'actionsRequired', 'type': 'str'}, } def __init__( self, **kwargs ):", "the mode is 'PreserveAspectRatio' then only one of width or", "sub-classes and not this class directly. Known sub-classes are: EncoderProcessor.", "zone name. :type required_zone_names: list[str] \"\"\" _validation = { 'id':", "None) self.validation_options = kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for controlling", "'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value", "'type': 'str'}, } def __init__( self, **kwargs ): super(TrackedResource, self).__init__(**kwargs)", "'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__(", "CertificateSource(msrest.serialization.Model): \"\"\"Base class for certificate sources. You probably want to", "token can be generated for the same IoT edge module", "name: Required. Node name. Must be unique within the topology.", "= kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls',", "encrypted. Variables are only populated by the server, and will", "): super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg", "self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type', None) self.created_at", "True}, 'iot_hub_name': {'required': True}, 'device_id': {'required': True}, } _attribute_map =", "{'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map", "to validate access tokens. Having multiple keys allow for seamless", "__init__( self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role', None)", "class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content token grants access to the video", "\"\"\" _validation = { 'name': {'required': True}, 'tier': {'readonly': True},", "self.tunnel = kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource management error", "self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): \"\"\"Required validation properties for", "self.authentication = kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection of AccessPolicyEntity", "_attribute_map = { 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, } def", "= None class StorageAccount(msrest.serialization.Model): \"\"\"The details about the associated storage", "availability request body. :param name: The name of the resource", "= None self.name = None self.type = None self.system_data =", "kwargs['inputs'] class Sku(msrest.serialization.Model): \"\"\"The SKU details. Variables are only populated", "'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id',", "{'readonly': True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly':", "to 'true' causes the certificate chain trust validation to be", "'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type':", "{'readonly': True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = kwargs.get('trusted_certificates',", "and credentials. Overall a topology is composed of the following:", "content is archived. :type video_name: str :param video_creation_properties: Optional video", "VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags used to change how video is published.", "when the video type is 'archive' and video archiving is", "\"kind\" is set to \"live\". :type segment_length: str :param retention_period:", "for parameters which have been declared in the referenced topology.", "class Operation(msrest.serialization.Model): \"\"\"An operation. All required parameters must be populated", "{'required': True}, 'certificates': {'required': True}, } _attribute_map = { 'type':", "= '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase):", "'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key':", "__init__( self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None)", "If omitted, the encoder uses the average frame rate of", "a firewall. :type tunnel: ~video_analyzer.models.TunnelBase \"\"\" _validation = { 'type':", "type: str self.issuers = kwargs.get('issuers', None) self.audiences = kwargs.get('audiences', None)", "is 'Pad'. If the mode is 'Pad' or 'Stretch' then", "'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs", "established using IoT Hub device information. All required parameters must", "true, then no content is archived. :type video_name: str :param", "'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'},", "Analyzer video resource to be used as the source. :type", "'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'},", "the RTSP and RTP exchange: TCP or HTTP. When using", "Video Analyzer account. Possible values include: \"Failed\", \"InProgress\", \"Succeeded\". :vartype", "} def __init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type =", "JWT token validation. You probably want to use the sub-classes", "{'key': 'id', 'type': 'str'}, } def __init__( self, **kwargs ):", "'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key':", "{'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'name':", ":type value: str \"\"\" _validation = { 'name': {'required': True},", "= { 'expiration_date': {'required': True}, } _attribute_map = { 'expiration_date':", "on any compatible DASH or HLS players by appending the", "super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.sku = kwargs.get('sku', None)", "str :param alg: Required. Elliptical curve algorithm to be used:", "= kwargs['ranges'] class VideoSink(SinkNodeBase): \"\"\"Video sink in a live topology", "resource to be ingested into a pipeline. Currently supported only", "kwargs.get('endpoint_url', None) self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model): \"\"\"Base class for", "Internal ID generated for the instance of the Video Analyzer", "RtspSource, VideoSource. All required parameters must be populated in order", "sending a request. :ivar name: The metric name. :vartype name:", "'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } def __init__(", "{'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self, **kwargs ):", "'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'},", "): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description',", "'#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource): \"\"\"Pipeline", "'@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineTopologyCollection,", "is 'Pad' or 'Stretch' then both width and height must", "if changes on the service provider require any updates on", "'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'},", "type: Optional[str] self.credentials = kwargs['credentials'] self.url = kwargs['url'] self.tunnel =", "claim to be present on the token. :type value: str", "connect to. :type url: str :param tunnel: Describes the tunnel", "{'key': 'bitrateKbps', 'type': 'str'}, } def __init__( self, **kwargs ):", "= { 'client_id': {'readonly': True}, 'principal_id': {'readonly': True}, } _attribute_map", "a Video Analyzer video resource to be ingested into a", "self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions = None", "to encrypt the account. The key may either be versioned", "'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},", ":ivar name: The name of the resource. :vartype name: str", "'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__(", "send to Azure. :param name: Required. Name of the claim", "\"\"\"Status of private endpoint connection operation. All required parameters must", "the topology. :type name: str :param transport: Network transport utilized", "Whether the operation applies to data-plane. :type is_data_action: bool :param", "**kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification = None class ResourceIdentity(msrest.serialization.Model): \"\"\"The", "'segmentLength', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoMediaInfo,", "'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'},", "class MetricSpecification(msrest.serialization.Model): \"\"\"A metric emitted by service. Variables are only", "when the video type is 'archive' and preview images are", "information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource of private", "presets, which define the recipe or instructions on how the", "re-establish connection (with exponential backoff), checking to see if the", "self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type:", "= None # type: Optional[str] class TlsEndpoint(EndpointBase): \"\"\"TLS endpoint describes", "the topology is used only for archiving content. Default is", "iot_hub_name: Required. Name of the IoT Hub. :type iot_hub_name: str", "'password', 'type': 'str'}, } def __init__( self, **kwargs ): super(UsernamePasswordCredentials,", "} def __init__( self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value =", "~video_analyzer.models.TimeSequenceBase \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':", "the video content authorization token to download the video MP4", "this is set to P30D (30 days), content older than", "Topology parameters without a default value must be defined. Topology", "with RSA algorithm. All required parameters must be populated in", "created_at: ~datetime.datetime :param last_modified_by: The identity that last modified the", "True}, 'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True},", "processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes.", "AudioEncoderAac. All required parameters must be populated in order to", "= kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): \"\"\"The service metric", "'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = {", "this class directly. Known sub-classes are: AudioEncoderAac. All required parameters", "): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing',", "= None class SinkNodeBase(NodeBase): \"\"\"Base class for topology sink nodes.", "status of the Iot Hub mapping. :vartype status: str \"\"\"", "enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map = { 'download_url': {'key':", "of URLs to the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param", "message: The error message. :vartype message: str :ivar target: The", "for the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate", "} def __init__( self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id =", "100 Kbps. If the RTSP camera exceeds this capacity, then", "is published. These are only allowed for topologies where \"kind\"", "{'key': '@type', 'type': 'str'}, 'issuers': {'key': 'issuers', 'type': '[str]'}, 'audiences':", "self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List", "'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } def __init__( self,", ":type type: str :param audio_encoder: Describes a custom preset for", "the most recent still image from the video archive in", "as an RTSP source which allows for content to be", "= kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list of private link", "None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection of information about the state", "-------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "the sub-classes and not this class directly. Known sub-classes are:", "self.error = None class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job represents a unique", "self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access =", "The Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The", "validation to be skipped. Default is 'false'. :type ignore_hostname: str", "): super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs.get('value', None)", "# type: str class RtspSource(SourceNodeBase): \"\"\"RTSP source allows for media", "published via a video resource. If archiving is enabled, this", "str :param user_assigned_identities: The User Assigned Managed Identities. :type user_assigned_identities:", "'blobDuration', 'type': 'str'}, } def __init__( self, **kwargs ): super(LogSpecification,", "'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerUpdate,", "= kwargs.get('claims', None) self.keys = kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): \"\"\"The", "Public network access for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param", "): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection", "be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). :type", "retention period will be effective within 24 hours. :type retention_period:", "incorrect behavior and will be lost if the code is", "'SystemData'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id", "key version. :vartype current_key_identifier: str \"\"\" _validation = { 'key_identifier':", "str :param message: The error message. :type message: str \"\"\"", ":param height: The desired output video height. :type height: str", "Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required parameters must be", "\"kind\" is set to \"live\". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\" _validation", "'str'}, } def __init__( self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small", "{'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, }", "} def __init__( self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id =", "Known sub-classes are: UsernamePasswordCredentials. All required parameters must be populated", "audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom preset for encoding", "The live content can be automatically played by the Azure", "{ 'value': {'key': 'value', 'type': '[Operation]'}, } def __init__( self,", "network access control. :param public_network_access: Whether or not public network", "the topology processor nodes. Processor nodes enable pipeline data to", "values include: \"Bytes\", \"Count\", \"Milliseconds\". :vartype unit: str or ~video_analyzer.models.MetricUnit", "A new provisioning token can be generated for the same", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The private", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def", "The video source only picks up recorded media within these", "~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer", "= { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, } def __init__(", "token itself is short lived and it is only used", "It is available when the video type is 'archive' and", "throughout the topology and can optionally have default values to", "Optional[str] self.kid = kwargs['kid'] class EccTokenKey(TokenKey): \"\"\"Required validation properties for", "not this class directly. Known sub-classes are: UsernamePasswordCredentials. All required", "True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map =", "persisted to storage. Smaller segments provide lower archive playback latency", "or instructions on how the input content should be processed.", "\"${PARAMETER_NAME}\" string pattern. Parameters can have optional default values and", "= { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token',", "Array of private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map", ":param trusted_certificates: List of trusted certificate authorities when authenticating a", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData \"\"\" _validation", "value: A collection of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param", "encoder uses the average frame rate of the input video.", "current status of the Iot Hub mapping. :vartype status: str", "optional description for the pipeline. :type description: str :ivar state:", "and will be ignored when sending a request. :ivar service_specification:", "the Azure Video Analyzer player widget. Alternatively, this URL can", "An optional description for the pipeline. :type description: str :ivar", "def __init__( self, **kwargs ): super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource'", "how the input content should be processed. You probably want", "maximum of 10 years. For example, if this is set", "self).__init__(**kwargs) self.name = kwargs['name'] self.tier = None class StorageAccount(msrest.serialization.Model): \"\"\"The", "be specified in ISO8601 duration format (i.e. \"PT30S\" equals 30", "Hub details. Variables are only populated by the server, and", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated", "'name': {'required': True}, 'endpoint': {'required': True}, } _attribute_map = {", "is only used for the initial handshake between IoT edge", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'}, }", "include: \"RS256\", \"RS384\", \"RS512\". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param", "None self.display_description = None self.unit = None self.aggregation_type = None", "str :ivar target: The error target. :vartype target: str :ivar", "112, 128, 160, 192, 224, and 256. If omitted, the", "def __init__( self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value',", "= kwargs['value'] class TrackedResource(Resource): \"\"\"The resource model definition for an", "\"\"\"Required validation properties for tokens generated with Elliptical Curve algorithm.", "type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a custom preset for encoding", "10 years, in 1 day increments. When absent (null), all", "ErrorResponse(msrest.serialization.Model): \"\"\"Common error response for all Azure Resource Manager APIs", "to the video content. :param download_url: Video file download URL.", "\"disableRtspPublishing\" must be set to 'false'. :type disable_archive: str :param", "__init__( self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code', None)", "nodes. Source nodes enable external data to be ingested by", "node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of a", "collection of VideoEntity items. :param value: A collection of VideoEntity", "be populated in order to send to Azure. :param type:", "self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku =", "edge module. :vartype edge_module_id: str \"\"\" _validation = { 'id':", "The URL of the endpoint. :type endpoint_url: str :param type:", "list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor", ":type video_name: str :param time_sequences: Required. Describes a sequence of", "class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information about the video and audio content.", "name: str :param value: Parameter value to be applied on", ":vartype name: str :ivar status: The status of the live", "output video width. :type width: str :param mode: Describes the", "allows for content from a Video Analyzer video resource to", "issuer is valid if it matches at least one of", "} def __init__( self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname =", "_attribute_map = { 'segment_length': {'key': 'segmentLength', 'type': 'str'}, } def", "JSON Web Tokens (JWT). All required parameters must be populated", "audio should be processed. You probably want to use the", "to the pipeline topology definition. :type topology_name: str :param description:", "} def __init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name =", "inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] \"\"\" _validation =", "identity for the Video Analyzer resource. All required parameters must", "edge module. All required parameters must be populated in order", ":type retention_period: str \"\"\" _attribute_map = { 'retention_period': {'key': 'retentionPeriod',", "error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'required': True}, }", "expiration_date: The content token expiration date in ISO8601 format (eg.", "'@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'}", "case a new video resource needs to be created on", "can be played in \"live mode\" with latencies which are", ":type issuers: list[str] :param audiences: List of expected token audiences.", "to the cloud. A new provisioning token can be generated", "derived types.Constant filled by server. :type type: str :param iot_hub_name:", "_attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, } def", "'video_name': {'key': 'videoName', 'type': 'str'}, 'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'},", "self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access control for video", "will be automatically deleted from your account. :vartype expiration: ~datetime.datetime", "= kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time',", "'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key':", "sku: Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__(", "super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time", "str \"\"\" _validation = { 'type': {'required': True}, 'status': {'readonly':", "a set of authentication keys which will be auto-rotated as", "which availability needs to be checked. :type name: str :param", "datetime ranges as a string. You probably want to use", "\"\"\"The error detail. Variables are only populated by the server,", "sub-classes are: VideoSink. All required parameters must be populated in", "consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map = { 'integration':", "if this is set to P30D (30 days), content older", "Analyzer account is (optionally) encrypted. Variables are only populated by", "{ 'segment_length': {'key': 'segmentLength', 'type': 'str'}, } def __init__( self,", "= { 'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required':", "self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type = None # type:", "validation properties for tokens generated with Elliptical Curve algorithm. All", "'type': '[str]'}, 'claims': {'key': 'claims', 'type': '[TokenClaim]'}, 'keys': {'key': 'keys',", "name. :vartype name: str :ivar display_name: The diagnostic log category", "self.id = kwargs['id'] self.identity = kwargs['identity'] self.status = None class", "True}, 'metric_specifications': {'readonly': True}, } _attribute_map = { 'log_specifications': {'key':", "it automatically to try and match the quality of the", "instances which share the same processing characteristics. For instance, a", "'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'},", "a request. :ivar client_id: The client ID. :vartype client_id: str", "the video content authorization token to expose a WebSocket tunneled", "self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model):", "this value to be returned as part of the resource", "expiration date in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime", "\"\"\" _validation = { 'type': {'required': True}, 'username': {'required': True},", "EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a custom preset for encoding the input content", "'display_name': {'readonly': True}, 'blob_duration': {'readonly': True}, } _attribute_map = {", "Known sub-classes are: EncoderProcessor. All required parameters must be populated", "or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of resource last modification", "created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime", "'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerOperationStatus,", "the action type. Possible values include: \"Internal\". :type action_type: str", "streaming. Default is 'false'. If set to 'true', then \"disableRtspPublishing\"", "self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model):", "kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at', None)", "self).__init__(**kwargs) self.type = None # type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The", "job operation. :vartype status: str :ivar error: The error details", "'alg': {'required': True}, 'n': {'required': True}, 'e': {'required': True}, }", "kwargs['alg'] self.n = kwargs['n'] self.e = kwargs['e'] class SourceNodeBase(NodeBase): \"\"\"Base", "the video content. :param download_url: Video file download URL. This", "{'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'type':", "to storage. Smaller segments provide lower archive playback latency but", "~video_analyzer.models.Kind :param sku: Required. Describes the properties of a SKU.", "} def __init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error =", "tokens generated with RSA algorithm. All required parameters must be", "'userAssignedIdentity', 'type': 'str'}, } def __init__( self, **kwargs ): super(ResourceIdentity,", "client_id: The client ID. :vartype client_id: str :ivar principal_id: The", "Note: if downstream of RTSP source, and if disableArchive is", "= kwargs.get('media_info', None) self.archival = kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): \"\"\"A", "{'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__( self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs)", "= kwargs['location'] class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint describes an endpoint that", "Azure. :param user_assigned_identity: Required. The user assigned managed identity's resource", "parameters that can be references across the topology nodes. *", "kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of", "'str'}, } def __init__( self, **kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id", ":type node_name: str \"\"\" _validation = { 'node_name': {'required': True},", "of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional", "provider: str :param resource: Resource on which the operation is", "days, up to a maximum of 10 years. For example,", "any \"\"\" _validation = { 'type': {'readonly': True}, 'info': {'readonly':", "{'key': 'nodeName', 'type': 'str'}, } def __init__( self, **kwargs ):", "str :param archive_base_url: Video archive streaming base URL. The archived", "state of the pipeline (read-only). Possible values include: \"Processing\", \"Canceled\",", "None) class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of an", "'principal_id': {'readonly': True}, } _attribute_map = { 'client_id': {'key': 'clientId',", "def __init__( self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase'", "a request. :ivar service_specification: The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification", "self.width = kwargs.get('width', None) self.mode = kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase):", ":ivar content_urls: Set of URLs to the video content. :vartype", "= kwargs.get('storage_accounts', None) self.endpoints = None self.encryption = kwargs.get('encryption', None)", "{'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ):", "super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource = kwargs.get('resource', None)", "a pipeline job. Videos ingested through live pipelines can be", "(AVC) codec. All required parameters must be populated in order", "} def __init__( self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type =", "'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions',", "\"\"\" _validation = { 'type': {'required': True}, 'ranges': {'required': True},", "_validation = { 'id': {'readonly': True}, } _attribute_map = {", "**kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str", "indicating whether or not there has ever been data recorded", "The error message. :type message: str \"\"\" _attribute_map = {", "storage account. Variables are only populated by the server, and", "players. Exported videos can be downloaded as MP4 files. Variables", "'large': {'key': 'large', 'type': 'str'}, } def __init__( self, **kwargs", "self).__init__(**kwargs) self.name = None self.status = None self.error = None", "pipeline job operation. :vartype status: str :ivar error: The error", "a request. :ivar name: The name of the pipeline job", "IoT Edge module twin properties. :vartype token: str \"\"\" _validation", "\"\"\"Video source allows for content from a Video Analyzer video", "the resolution of the encoded video. If omitted, the encoder", "'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__(", "live, low-latency feed is available from the source. :type rtsp_tunnel_url:", "are: AudioEncoderAac. All required parameters must be populated in order", "properties for JWT token validation. You probably want to use", "pipeline. The fact that is being referenced, doesn't necessarily indicate", "'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs)", "} def __init__( self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type =", "= '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase):", "'iso-8601'}, } def __init__( self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date", "of LivePipeline items. :param value: A collection of LivePipeline items.", "for topologies where \"kind\" is set to \"live\". :param disable_archive:", "None class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job represents a unique instance of", "'str'}, 'width': {'key': 'width', 'type': 'str'}, 'mode': {'key': 'mode', 'type':", "The properties of the key used to encrypt the account.", "properties can be defined through the use of user-defined parameters,", ":type e: str \"\"\" _validation = { 'type': {'required': True},", ":param last_modified_by: The identity that last modified the resource. :type", "None) class AccountEncryption(msrest.serialization.Model): \"\"\"Defines how the Video Analyzer account is", "parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation = { 'id': {'readonly': True}, 'name':", ":type resource: str :param operation: The operation type. :type operation:", "type: str :param audio_encoder: Describes a custom preset for encoding", "{'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type':", "): super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): \"\"\"Operation", "str :param value: Required. Expected value of the claim to", "'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description': {'key':", "sink nodes. You probably want to use the sub-classes and", "claims. All required parameters must be populated in order to", "generate larger volume of storage transactions. Larger segments reduce the", "# Licensed under the MIT License. See License.txt in the", "Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate', None) self.scale", "\"\"\"Live pipeline represents a unique instance of a live topology,", "self.display_name = None self.display_description = None self.unit = None self.aggregation_type", "kept in storage. It must be provided in the ISO8601", ":type archival: ~video_analyzer.models.VideoArchival \"\"\" _validation = { 'id': {'readonly': True},", "can_stream: Required. Value indicating whether or not the video can", "= { 'can_stream': {'required': True}, 'has_data': {'required': True}, 'is_in_use': {'required':", "'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin',", "flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs to the video", "omitted, the bitrate of the input audio is used. :type", "Video segment length indicates the length of individual video files", "Topology parameters with a default value can be optionally be", "vary between 1 day to 10 years, in 1 day", "pipeline topology definition. :type topology_name: str :param description: An optional", "integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access for ingestion group.", "account must be a Standard Storage account (either Microsoft.ClassicStorage or", "str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types:", "**kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "The timestamp of resource last modification (UTC). :type last_modified_at: ~datetime.datetime", "kwargs.get('description', None) self.type_properties_type = None self.flags = None self.content_urls =", "identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource.", "{'key': 'value', 'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, }", "the validation of TLS endpoints. :param ignore_hostname: When set to", "~video_analyzer.models.ResourceIdentity :ivar status: The current status of the storage account", "edge module through the Azure IoT Edge module twin properties.", "key modulus. :type n: str :param e: Required. RSA public", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps':", "= { 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status',", "= kwargs['name'] self.value = kwargs['value'] class TrackedResource(Resource): \"\"\"The resource model", "MDM account is enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The", "str \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "automatically played by the Azure Video Analyzer player widget. Alternatively,", "or Microsoft.Storage). :type id: str :param identity: A managed identity", "class CredentialsBase(msrest.serialization.Model): \"\"\"Base class for credential objects. You probably want", "the state of the connection between service consumer and provider.", ":param parameters: List of the instance level parameter values for", "the user assigned managed identity used by the Video Analyzer", "): super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height', None) self.width = kwargs.get('width',", ":type segment_length: str \"\"\" _attribute_map = { 'segment_length': {'key': 'segmentLength',", ":vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the", "is to be applied across all the cameras. Individual instance", "= { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id',", "'location': {'key': 'location', 'type': 'str'}, } def __init__( self, **kwargs", "at a sampling rate of 48 kHz). Allowed values are", "the video and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival:", "are only allowed for topologies where \"kind\" is set to", "IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access:", "'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs)", "used to encrypt Video Analyzer account, including the key version.", "{ 'type': {'key': '@type', 'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type':", ":param name: Required. Node name. Must be unique within the", "str \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[VideoEntity]'},", "'name', 'type': 'str'}, } def __init__( self, **kwargs ): super(EncoderSystemPreset,", "'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, } def __init__( self,", "True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map =", "audio with the AAC codec. All required parameters must be", "# Code generated by Microsoft (R) AutoRest Code Generator. #", "class RtspSource(SourceNodeBase): \"\"\"RTSP source allows for media from an RTSP", "ever been data recorded or uploaded into the video. Newly", "to be used if the pipeline does not specify a", "able to periodically connect to the cloud. A new provisioning", "directly. Known sub-classes are: EncoderProcessor. All required parameters must be", ":param title: Optional video title provided by the user. Value", "list[~video_analyzer.models.NodeInput] \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':", "kwargs.get('media_info', None) self.archival = kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection", "The status of the pipeline job operation. :vartype status: str", "'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'},", ":param value: A collection of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity]", "log_specifications: List of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications:", "None) class VideoScale(msrest.serialization.Model): \"\"\"The video scaling information. :param height: The", "{ 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__( self,", "'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'}, } def __init__(", "sending a request. :ivar log_specifications: List of log specifications. :vartype", "'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key':", "RSA public key modulus. :type n: str :param e: Required.", "str or ~video_analyzer.models.ParameterType :param description: Description of the parameter. :type", ":vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology kind. Possible values include:", "str self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor allows for", ":param kid: Required. JWT token key id. Validation keys are", "\"Average\", \"Count\", \"Total\". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type:", "'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'},", "{'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map", "Video content type. Different content types are suitable for different", "behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of", ":type description: str :param segment_length: Segment length indicates the length", "VideoEncoderH264. All required parameters must be populated in order to", "modulus. :type n: str :param e: Required. RSA public key", "user defined parameters that can be references across the topology", ":type description: str :ivar type_properties_type: Video content type. Different content", "'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type':", "endpoint_url: str :param type: Required. The type of the endpoint.", ":param message: Detailed reason why the given name is available.", "kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains", "'type': 'str'}, } def __init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs)", "the pipeline (read-only). Possible values include: \"Inactive\", \"Activating\", \"Active\", \"Deactivating\".", "metric specifications. Variables are only populated by the server, and", "medium: Medium resolution preview image URL. :type medium: str :param", "class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of video analyzer operation. All required parameters", "be specified in ISO8601 duration format (i.e. \"P1D\" equals 1", "items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to the", "'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, 'is_data_action': {'key':", "): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error", "the video content URL as the value for the \"token\"", "when the video type is 'file' and video file is", "in increments of 100 Kbps. If the RTSP camera exceeds", "super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None)", "= kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control',", "server. :type type: str :param credentials: Required. Credentials to be", "is_in_use: Required. Value indicating whether or not the video is", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'title': {'key': 'properties.title',", "of private endpoint connection associated with the specified storage account.", "URL can be used in conjunction with the video content", "uploaded into the video. Newly created videos have this value", "'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key':", "'actions_required': {'key': 'actionsRequired', 'type': 'str'}, } def __init__( self, **kwargs", "account. :vartype source_mdm_account: str :ivar source_mdm_namespace: The source MDM namespace.", "Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters must be populated", "__init__( self, **kwargs ): super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height', None)", "error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional", "bitrate_kbps: int :ivar state: Current state of the pipeline (read-only).", "{'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description':", "same processing characteristics. For instance, a pipeline topology which captures", "'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs)", "(either Microsoft.ClassicStorage or Microsoft.Storage). :type id: str :param identity: A", "transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind',", "'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, } def __init__( self,", "class AuthenticationBase(msrest.serialization.Model): \"\"\"Base class for access policies authentication methods. You", "are: JwtAuthentication. All required parameters must be populated in order", "can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\" _validation", "include: \"Live_S1\", \"Batch_S1\". :type name: str or ~video_analyzer.models.SkuName :ivar tier:", "'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess',", "def __init__( self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264'", "None class AudioEncoderBase(msrest.serialization.Model): \"\"\"Base type for all audio encoder presets,", "new desired retention period will be effective within 24 hours.", "a request. :param tags: A set of tags. Resource tags.", "to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The", "downloaded as MP4 files. Variables are only populated by the", "to be stored or exported to other destinations. Variables are", "{'key': 'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by':", "sequences from existing captured video through a pipeline job. Videos", "self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None self.status =", "the ISO8601 duration format in the granularity of days, up", "Options to change how the video sink publishes content via", "None class SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining to creation and last modification", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "of key used to encrypt the Account Key. Possible values", "strict validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\" _validation =", "Private Endpoint Connection resource. Variables are only populated by the", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__( self,", "'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'group_id': {'key':", "reset. Variables are only populated by the server, and will", "the Iot Hub mapping. :vartype status: str \"\"\" _validation =", "topology is composed of the following: * Parameters: list of", "type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\". :vartype type:", "Azure Video Analyzer IoT edge module through the Azure IoT", "self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id", "= kwargs.get('origin', None) self.properties = kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action',", "values include: \"Live\", \"Batch\". :type kind: str or ~video_analyzer.models.Kind :param", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration':", "disable_rtsp_publishing: str \"\"\" _attribute_map = { 'disable_archive': {'key': 'disableArchive', 'type':", "Resource tags. :type tags: dict[str, str] :param identity: The identities", "} def __init__( self, **kwargs ): super(IotHub, self).__init__(**kwargs) self.id =", "has ever been data recorded or uploaded into the video.", "self.segment_length = kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview image URLs.", "Parameters can have optional default values and can later be", "'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'}, } def __init__( self, **kwargs", "user_assigned_identities: The User Assigned Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity]", "Video Analyzer to connect to. :type url: str :param tunnel:", "EdgeModuleEntity(ProxyResource): \"\"\"The representation of an edge module. Variables are only", "not this class directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase.", "class LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline represents a unique instance of a", "'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType',", "endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map = { 'value':", "required_members: list[str] :param required_zone_names: The private link resource Private link", "'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes',", "PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology describes the processing steps to be applied", "pipeline (read-only). Possible values include: \"Inactive\", \"Activating\", \"Active\", \"Deactivating\". :vartype", "to be checked. :type name: str :param type: The resource", ":param sku: Required. Describes the properties of a SKU. :type", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference", "this node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key':", "# type: Optional[str] self.credentials = kwargs['credentials'] self.url = kwargs['url'] self.tunnel", "'[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self,", "instance of a live topology, used for real-time ingestion, archiving", "will be ignored when sending a request. :ivar service_specification: The", "as a secret string in order to prevent this value", "up to a maximum of 10 years. For example, if", "time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\" _validation = { 'type': {'required': True}, 'name':", "= { 'type': {'required': True}, 'status': {'readonly': True}, } _attribute_map", "name: str :ivar display_name: The diagnostic log category display name.", "dimensions: The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates", "archiving and publishing of content for a unique RTSP camera.", "} def __init__( self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value =", "'serviceSpecification', 'type': 'ServiceSpecification'}, } def __init__( self, **kwargs ): super(Properties,", "'mode', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoScale,", "a video of type 'archive'. If used in a batch", "or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of the key used", "str \"\"\" _attribute_map = { 'name': {'key': 'name', 'type': 'str'},", "which are intended to be kept in storage. It must", "created_by_type: The type of identity that created the resource. Possible", "Elliptical Curve algorithm. All required parameters must be populated in", "def __init__( self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small',", "= None class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job represents a unique instance", "ISO8601 duration format (i.e. \"P1D\" equals 1 day) and can", "{'key': 'tunnel', 'type': 'TunnelBase'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint':", "bool \"\"\" _validation = { 'can_stream': {'required': True}, 'has_data': {'required':", "self.type = None # type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): \"\"\"A remote", ":param download_url: Video file download URL. This URL can be", "Validation options to use when authenticating a TLS connection. By", "endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption:", "str \"\"\" _validation = { 'id': {'readonly': True}, } _attribute_map", "'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self,", "self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): \"\"\"The video scaling information.", "bitrate is now below the reserved capacity. Doing so will", "'[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self,", "populated in order to send to Azure. :param expiration_date: Required.", "HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs)", "for requests in each blob. :vartype blob_duration: str \"\"\" _validation", "list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly':", "in order to send to Azure. :param endpoint_url: The URL", "without a default value must be defined. Topology parameters with", "it matches at least one of the given values. :type", "The identity that last modified the resource. :type last_modified_by: str", "added to the video content URL as the value for", "'false'. If set to 'true', then \"disableRtspPublishing\" must be set", "values include: \"Live_S1\", \"Batch_S1\". :type name: str or ~video_analyzer.models.SkuName :ivar", "kwargs['location'] class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint describes an endpoint that the", "'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def __init__( self, **kwargs ): super(VideoContentUrls,", "{'key': 'medium', 'type': 'str'}, 'large': {'key': 'large', 'type': 'str'}, }", "str] :param location: Required. The geo-location where the resource lives.", "Value can be up to 256 characters long. :type title:", "video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\" _validation = { 'type': {'required': True}, 'name':", "'type': 'str'}, 'device_id': {'key': 'deviceId', 'type': 'str'}, } def __init__(", "defined parameters that can be references across the topology nodes.", "be gated on events or camera may not be accessible", "case the module state lost or reset. Variables are only", "'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type': 'str'},", "= None self.parameters = kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection", "} def __init__( self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications =", "TLS connection. A null list designates that Azure Video Analyzer's", "name: Required. The SKU name. Possible values include: \"Live_S1\", \"Batch_S1\".", "High resolution preview image URL. :type large: str \"\"\" _attribute_map", "_validation = { 'name': {'required': True}, 'type': {'required': True}, }", "to capture and publish content. Note: if downstream of RTSP", "topology. Topology parameters without a default value must be defined.", "model definition for an Azure Resource Manager tracked top level", "# type: str class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details of the user", "audio at a sampling rate of 48 kHz). Allowed values", "{'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state':", "the resource. :vartype name: str :ivar type: The type of", "JwtAuthentication(AuthenticationBase): \"\"\"Properties for access validation based on JSON Web Tokens", "'#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class RtspSource(SourceNodeBase): \"\"\"RTSP source allows for", "value: list[~video_analyzer.models.Operation] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type':", "ignored when sending a request. :ivar name: The name of", "image from the video archive in different resolutions. They are", "connect to the cloud. A new provisioning token can be", "'@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key':", ":type type: str \"\"\" _validation = { 'type': {'required': True},", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'audio_encoder':", "'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } _subtype_map = {", "str :param audio_encoder: Describes a custom preset for encoding audio.", "_validation = { 'type': {'required': True}, 'credentials': {'required': True}, 'url':", ":param small: Low resolution preview image URL. :type small: str", "'str'}, 'large': {'key': 'large', 'type': 'str'}, } def __init__( self,", "{'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, }", "'inputs': {'required': True}, 'video_name': {'required': True}, } _attribute_map = {", "True}, 'status': {'readonly': True}, } _attribute_map = { 'type': {'key':", "parameter. See pipeline topology parameters for more information. All required", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def", "token expiration date in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date:", "kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties", "video. :type scale: ~video_analyzer.models.VideoScale \"\"\" _validation = { 'type': {'required':", "_validation = { 'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly': True}, }", "None self.display_name = None self.display_description = None self.unit = None", "to be used: ES256, ES384 or ES512. Possible values include:", "be used if the pipeline does not specify a value.", "= { 'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion',", "~video_analyzer.models.SystemData :ivar group_id: The private link resource group id. :vartype", "'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoArchival,", "video_publishing_options: Options to change how the video sink publishes content", "for video analyzer account. :param integration: Public network access for", "the IoT Hub. :type iot_hub_name: str :param device_id: Required. The", "'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__(", "'group_id': {'key': 'properties.groupId', 'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},", "self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource):", "of the token signing key. Token signature must match exactly", "): super(AuthenticationBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class", "the operation applies to data-plane. :type is_data_action: bool :param action_type:", "= kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options', None)", "= kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline", ":param issuers: List of expected token issuers. Token issuer is", "type: str or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation = { 'type': {'required':", "control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name':", "type: Optional[str] class CertificateSource(msrest.serialization.Model): \"\"\"Base class for certificate sources. You", "str :param description: The operation description. :type description: str \"\"\"", "__init__( self, **kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title', None)", "_validation = { 'name': {'required': True}, 'tier': {'readonly': True}, }", "__init__( self, **kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name = None self.display_name", "log category display name. :vartype display_name: str :ivar blob_duration: The", "on the current video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls:", "allowed for specified resources under the Video Analyzer account. Possible", "collection of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map =", "str class AuthenticationBase(msrest.serialization.Model): \"\"\"Base class for access policies authentication methods.", "be populated in order to send to Azure. :param node_name:", "will be periodically deleted. This value can be updated at", "status: str \"\"\" _validation = { 'id': {'required': True}, 'identity':", "as the source. :type video_name: str :param time_sequences: Required. Describes", "to download the most recent still image from the video", "None) self.endpoints = None self.encryption = kwargs.get('encryption', None) self.iot_hubs =", "account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'id': {'readonly':", "{'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types':", "'tags' and a 'location'. Variables are only populated by the", "system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology.", "of an edge module. Variables are only populated by the", "): super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion',", "'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key':", "self.next_link = kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the", "def __init__( self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication'", "{ 'endpoint_url': {'key': 'endpointUrl', 'type': 'str'}, 'type': {'key': 'type', 'type':", "resource. :vartype name: str :ivar type: The type of the", "**kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str", "{ 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs ):", "resource on API requests. :type password: str \"\"\" _validation =", "'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs)", "initial handshake between IoT edge module and the cloud. After", "external data to be ingested by the pipeline. :type sources:", "\"Live_S1\", \"Batch_S1\". :type name: str or ~video_analyzer.models.SkuName :ivar tier: The", "each blob. :vartype blob_duration: str \"\"\" _validation = { 'name':", "self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.description", "class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check availability result. :param name_available: Indicates if", "value: str \"\"\" _validation = { 'name': {'required': True}, }", "allows for media from an RTSP camera or generic RTSP", "\"Deleting\", \"Failed\". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation =", "of the collection (when the collection contains too many results", "'description', 'type': 'str'}, 'default': {'key': 'default', 'type': 'str'}, } def", "storage account mapping. :vartype status: str \"\"\" _validation = {", ":vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock", "= kwargs.get('tags', None) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts',", "no content is archived. :type video_name: str :param video_creation_properties: Optional", "_validation = { 'user_assigned_identity': {'required': True}, } _attribute_map = {", "{'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'n':", "'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors',", ":param properties: Operation properties format. :type properties: ~video_analyzer.models.Properties :param is_data_action:", "can be used in conjunction with the video content authorization", ":param value: A collection of Operation items. :type value: list[~video_analyzer.models.Operation]", "'archive' and a live, low-latency feed is available from the", "'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs)", "~video_analyzer.models.SystemData :param role: Defines the access level granted by this", "True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'id': {'key':", "): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.sku = kwargs.get('sku',", "'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__( self, **kwargs ): super(EncoderPresetBase,", "{'readonly': True}, } _attribute_map = { 'client_id': {'key': 'clientId', 'type':", "str :ivar message: The error message. :vartype message: str :ivar", "__init__( self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' #", "this pipeline job will be automatically deleted from your account.", "defined in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration]", "True}, 'system_data': {'readonly': True}, 'kind': {'required': True}, 'sku': {'required': True},", "_validation = { 'type': {'required': True}, 'ranges': {'required': True}, }", "audio_encoder: Describes a custom preset for encoding audio. :type audio_encoder:", "list of one or more data sources nodes such as", "Optional[str] class CertificateSource(msrest.serialization.Model): \"\"\"Base class for certificate sources. You probably", "want to use the sub-classes and not this class directly.", "array of upstream node references within the topology to be", "{'required': True}, 'current_key_identifier': {'readonly': True}, } _attribute_map = { 'key_identifier':", "'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel',", "PrivateLinkResourceListResult(msrest.serialization.Model): \"\"\"A list of private link resources. :param value: Array", "input content. For example, it can used to change the", "= { 'small': {'key': 'small', 'type': 'str'}, 'medium': {'key': 'medium',", "'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type':", "Overall a topology is composed of the following: * Parameters:", "will be ignored when sending a request. :ivar client_id: The", "Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = {", "the Key Vault mapping. :vartype status: str \"\"\" _validation =", "day to 10 years, in 1 day increments. When absent", "validation. You probably want to use the sub-classes and not", "error detail. Variables are only populated by the server, and", "= kwargs.get('name', None) self.type = kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The", "'str'}, } def __init__( self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code", "resource of private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state:", "self.log_specifications = None self.metric_specifications = None class SinkNodeBase(NodeBase): \"\"\"Base class", "= kwargs['name'] class ProcessorNodeBase(NodeBase): \"\"\"Base class for topology processor nodes.", "Required. The type of the endpoint. Possible values include: \"ClientApi\".", "current node. :type node_name: str \"\"\" _validation = { 'node_name':", "'node_name': {'key': 'nodeName', 'type': 'str'}, } def __init__( self, **kwargs", "__init__( self, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] class", "managed identity for the Video Analyzer resource. All required parameters", "} def __init__( self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value =", "None) self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources", "periodically deleted. This value can be updated at any time", "lower archive playback latency but generate larger volume of storage", "'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type':", "'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'x': {'key':", "= kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base", "str :ivar blob_duration: The time range for requests in each", "__init__( self, **kwargs ): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): \"\"\"Access policies", "kwargs['y'] class EdgeModuleEntity(ProxyResource): \"\"\"The representation of an edge module. Variables", "a pipeline node. All required parameters must be populated in", "kind: str or ~video_analyzer.models.Kind :param sku: Required. Describes the properties", "here. :type description: str :param parameters: List of the topology", ":param id: Operation resource ID. :type id: str :param start_time:", "{'required': True}, 'name': {'required': True}, } _attribute_map = { 'type':", "rights reserved. # Licensed under the MIT License. See License.txt", "'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state',", "- HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an", "= { 'node_name': {'key': 'nodeName', 'type': 'str'}, } def __init__(", "private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'endpoints': {'readonly': True}, 'provisioning_state':", "dimension. Variables are only populated by the server, and will", "account. The key may either be versioned (for example https://vault/keys/mykey/version1)", "{ 'name': {'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly': True},", "self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state =", "type. Different content types are suitable for different applications and", "'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The", "state: Current state of the pipeline (read-only). Possible values include:", "'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},", "None) class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview image URLs. These URLs can", "within 24 hours. :type retention_period: str \"\"\" _attribute_map = {", "is_in_use: bool \"\"\" _validation = { 'can_stream': {'required': True}, 'has_data':", "are interleaved on the TCP RTSP connection. When using HTTP,", "\"\"\"Base class for nodes. You probably want to use the", "live pipelines can be streamed through Azure Video Analyzer Player", "timestamp of resource creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by:", "VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed identity for the Video Analyzer resource. All", "EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type for all encoder presets, which define the", "'properties', 'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'action_type': {'key':", "that is being referenced, doesn't necessarily indicate that data is", "a request. :ivar id: The ARM identifier for Private Endpoint.", "{'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target':", "\"\"\"The managed identity for the Video Analyzer resource. All required", "as the value for the \"token\" query string parameter. The", "of AccessPolicyEntity items. :param value: A collection of AccessPolicyEntity items.", "this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not", "'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'},", ":type kid: str \"\"\" _validation = { 'type': {'required': True},", "information. :vartype system_data: ~video_analyzer.models.SystemData :param title: Optional video title provided", "kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): \"\"\"A collection of Operation items. :param", ":param type: Required. The discriminator for derived types.Constant filled by", "value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] \"\"\"", "Possible values include: \"SystemKey\", \"CustomerKey\". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType", "'type': 'str'}, } def __init__( self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs)", "default value for the parameter to be used if the", "kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): \"\"\"Base class for credential objects. You", "{'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status':", ":type archive_base_url: str :param rtsp_tunnel_url: Video low-latency streaming URL. The", "kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): \"\"\"Base class for nodes. You probably", ":type properties: ~video_analyzer.models.Properties :param is_data_action: Whether the operation applies to", "class PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection of PipelineJob items. :param value: A", "Describes a sequence of datetime ranges. The video source only", "Name of the built-in encoding preset. Possible values include: \"SingleLayer_540p_H264_AAC\",", "None) self.dimensions = None self.enable_regional_mdm_account = None self.source_mdm_account = None", "{'key': 'iotHubName', 'type': 'str'}, 'device_id': {'key': 'deviceId', 'type': 'str'}, }", "name. :vartype display_name: str :ivar display_description: The metric display description.", "current_key_identifier: The current key used to encrypt Video Analyzer account,", "'Stretch' then both width and height must be specified. Else", ":vartype tier: str or ~video_analyzer.models.SkuTier \"\"\" _validation = { 'name':", "'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key':", "the base URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS", "include: \"Inactive\", \"Activating\", \"Active\", \"Deactivating\". :vartype state: str or ~video_analyzer.models.LivePipelineState", "super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs =", ":param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the", "collection of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A", "self.origin = kwargs.get('origin', None) self.properties = kwargs.get('properties', None) self.is_data_action =", "Resource Manager proxy resource. It will not have tags and", "error: The error object. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _attribute_map =", "'principalId', 'type': 'str'}, } def __init__( self, **kwargs ): super(UserAssignedManagedIdentity,", "'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key':", "str self.name = kwargs['name'] class Endpoint(msrest.serialization.Model): \"\"\"The endpoint details. All", ":ivar status: The current status of the storage account mapping.", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} }", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The private link", "): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None) self.reason = kwargs.get('reason',", "'bool'}, 'has_data': {'key': 'hasData', 'type': 'bool'}, 'is_in_use': {'key': 'isInUse', 'type':", "= { 'id': {'key': 'id', 'type': 'str'}, } def __init__(", "that Video Analyzer will use to access the storage account.", "class OperationCollection(msrest.serialization.Model): \"\"\"A collection of Operation items. :param value: A", "): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs.get('identity', None)", "\"File\". :vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags: Video flags", "'medium', 'type': 'str'}, 'large': {'key': 'large', 'type': 'str'}, } def", "super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs =", "kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at', None)", "order to send to Azure. :ivar id: Fully qualified resource", "str :param value: Parameter value to be applied on this", "{ 'node_name': {'key': 'nodeName', 'type': 'str'}, } def __init__( self,", "'type': 'str'}, 'issuers': {'key': 'issuers', 'type': '[str]'}, 'audiences': {'key': 'audiences',", "topology parameters. A pipeline can only define or override parameters", "kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None)", "kwargs.get('claims', None) self.keys = kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): \"\"\"The details", "List of additional token claims to be validated. Token must", "= None self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model): \"\"\"Network access control", "input video. :type scale: ~video_analyzer.models.VideoScale \"\"\" _validation = { 'type':", "use of the topology to be described here. :type description:", "Possible values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type last_modified_by_type: str", "blob_duration: str \"\"\" _validation = { 'name': {'readonly': True}, 'display_name':", "str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation type. Possible", "request. :ivar code: The error code. :vartype code: str :ivar", ":type created_at: ~datetime.datetime :param last_modified_by: The identity that last modified", "error detail. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name':", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A", "self.details = None self.additional_info = None class ErrorResponse(msrest.serialization.Model): \"\"\"Common error", "self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs['identity'] self.status = None", "below the reserved capacity. Doing so will ensure that one", "Default is 'false'. If set to 'true', then \"disableRtspPublishing\" must", "self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type:", "ignored when sending a request. :ivar name: The metric name.", "reduce the amount of storage transactions while increasing the archive", "module and the cloud. After the initial handshake, the IoT", "to send to Azure. :param name: Required. The SKU name.", "a video resource. If archiving is enabled, this results in", "for topology processor nodes. You probably want to use the", "'location'. Variables are only populated by the server, and will", "an active pipeline. The fact that is being referenced, doesn't", "retention period indicates how long the video is kept in", "{'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, }", "'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},", "{'readonly': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type':", ":vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing", "\"\"\" _validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True},", "group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access for", "be versioned (for example https://vault/keys/mykey/version1) or reference a key without", "this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated", "'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'},", "that created the resource. :type created_by: str :param created_by_type: The", "{'readonly': True}, 'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly':", "'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } def __init__( self, **kwargs", "'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key':", "'type': 'VideoPublishingOptions'}, } def __init__( self, **kwargs ): super(VideoSink, self).__init__(**kwargs)", "playback latency. Value must be specified in ISO8601 duration format", "Optional[str] self.name = kwargs['name'] class ProcessorNodeBase(NodeBase): \"\"\"Base class for topology", "{'readonly': True}, 'token': {'readonly': True}, } _attribute_map = { 'expiration_date':", "**kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier = None", "class PipelineJob(ProxyResource): \"\"\"Pipeline job represents a unique instance of a", "'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'},", "'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'video_name':", "tokens generated with Elliptical Curve algorithm. All required parameters must", "True}, 'required_members': {'readonly': True}, } _attribute_map = { 'id': {'key':", "a TLS connection. By default, strict validation is used. :type", "__init__( self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' #", "\"\"\"A collection of LivePipeline items. :param value: A collection of", "name: str :ivar status: The status of the live pipeline", "storage. Value must be specified in ISO8601 duration format (i.e.", "the topology nodes. * Sources: list of one or more", "the registration token. The Azure Video Analyzer IoT edge module", "kwargs.get('status', None) self.description = kwargs.get('description', None) self.actions_required = kwargs.get('actions_required', None)", "PrivateLinkResource(Resource): \"\"\"A private link resource. Variables are only populated by", "sets it automatically to try and match the quality of", ":type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this", "a location. Variables are only populated by the server, and", "created on the service. These will not take effect if", "'type': 'SystemData'}, } def __init__( self, **kwargs ): super(ProxyResource, self).__init__(**kwargs)", "parameters must be populated in order to send to Azure.", "None self.status = None self.error = None class LivePipelineUpdate(ProxyResource): \"\"\"Live", "'e': {'key': 'e', 'type': 'str'}, } def __init__( self, **kwargs", "~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated for the instance of", "{ 'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason', 'type':", "the video archive segments which are intended to be kept", "{'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info':", "IoT edge module will agree on a set of authentication", "Optional[str] class TlsEndpoint(EndpointBase): \"\"\"TLS endpoint describes an endpoint that the", "resolution preview image URL. :type medium: str :param large: High", "be unique within the topology. :type name: str :param transport:", "rtsp_tunnel_url: str :param preview_image_urls: Video preview image URLs. These URLs", "None) self.sinks = kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private Endpoint", "the Video Analyzer account. Possible values include: \"Failed\", \"InProgress\", \"Succeeded\".", "{'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type':", "} def __init__( self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name =", "self.transport = kwargs.get('transport', None) self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): \"\"\"Base", "self.type = None self.info = None class ErrorDetail(msrest.serialization.Model): \"\"\"The error", "{'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly':", "def __init__( self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id = None", "name: str :param value: Required. Expected value of the claim", "'actionsRequired', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateLinkServiceConnectionState,", "\"ES384\", \"ES512\". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required.", "class ErrorDetail(msrest.serialization.Model): \"\"\"The error detail. Variables are only populated by", "send to Azure. :param can_stream: Required. Value indicating whether or", "= None self.source_mdm_namespace = None self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model):", "in storage. Value must be specified in ISO8601 duration format", "List of expected token issuers. Token issuer is valid if", "handshake, the IoT edge module will agree on a set", "analysis or transformations. * Sinks: list of one or more", "kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): \"\"\"Defines how", "when the topology is used only for low latency video", "keys: List of keys which can be used to validate", "'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key':", "'properties.archival', 'type': 'VideoArchival'}, } def __init__( self, **kwargs ): super(VideoEntity,", "token header. :type kid: str :param alg: Required. Elliptical curve", "'name': {'readonly': True}, 'display_name': {'readonly': True}, 'display_description': {'readonly': True}, 'unit':", "self.name = kwargs['name'] self.value = kwargs.get('value', None) class PemCertificateList(CertificateSource): \"\"\"A", "where the resource lives. :type location: str :param identity: The", "this class directly. Known sub-classes are: EccTokenKey, RsaTokenKey. All required", "of the pipeline job operation. :vartype status: str :ivar error:", "None class VideoArchival(msrest.serialization.Model): \"\"\"Video archival properties. :param retention_period: Video retention", "Video Analyzer IoT edge module must be initialized and connected", "kwargs['url'] self.tunnel = kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource management", "to be used on a pipeline node. All required parameters", "principal_id: The principal ID. :vartype principal_id: str \"\"\" _validation =", "'SourceNodeBase'} } def __init__( self, **kwargs ): super(NodeBase, self).__init__(**kwargs) self.type", "'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs)", "self.display = kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.properties =", "= { 'type': {'required': True}, 'ranges': {'required': True}, } _attribute_map", ":type value: list[~video_analyzer.models.VideoEntity] :param next_link: A link to the next", "'token', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoContentToken,", "not this class directly. Known sub-classes are: VideoEncoderH264. All required", "'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type':", "**kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None) self.reason =", "or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for approval/rejection of the", "to the Internet prior to the token expiration date. :type", "self.next_link = kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): \"\"\"Details about the error", "populated in order to send to Azure. :param endpoint_url: The", "request. :ivar expiration_date: The expiration date of the registration token.", "'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'},", "tables, queues, and blobs. The primary storage account must be", "example, video recording may be gated on events or camera", "'type': 'str'}, } def __init__( self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs)", "at which audio should be encoded (2-channel stereo audio at", "Processors: list of nodes which perform data analysis or transformations.", "{'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( self, **kwargs ):", "None) self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model): \"\"\"Base class for endpoints.", "date. :type expiration_date: ~datetime.datetime \"\"\" _validation = { 'expiration_date': {'required':", "the initial call to create the video resource can lead", ":type keys: list[~video_analyzer.models.TokenKey] \"\"\" _validation = { 'type': {'required': True},", "RTSP camera and archives the content can be reused across", "enable external data to be ingested by the pipeline. :type", "be set to 'false'. :type disable_archive: str :param disable_rtsp_publishing: When", "be set to 'false'. :type disable_rtsp_publishing: str \"\"\" _attribute_map =", "by Microsoft (R) AutoRest Code Generator. # Changes may cause", "str :ivar display_name: The diagnostic log category display name. :vartype", "information about the available video actions and its dynamic properties", "encrypt Video Analyzer account, including the key version. :vartype current_key_identifier:", "'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type':", "= None self.info = None class ErrorDetail(msrest.serialization.Model): \"\"\"The error detail.", "PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection of PipelineJob items. :param value: A collection", "= { 'type': {'required': True}, } _attribute_map = { 'type':", "**kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing =", "resolution from 4K to 1280x720. All required parameters must be", ":vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric specifications. :vartype", "Azure Resource Manager proxy resource. It will not have tags", "sub-classes and not this class directly. Known sub-classes are: VideoEncoderH264.", "Indicates if the resource name is available. :type name_available: bool", "and can later be defined in individual instances of the", "str \"\"\" _validation = { 'name': {'required': True}, } _attribute_map", "'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs',", "name: str or ~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation = { 'type': {'required':", "str :param identity: Required. The IoT Hub identity. :type identity:", "typically used when the endpoint is behind a firewall. :type", "the referenced topology. Topology parameters without a default value must", "~video_analyzer.models.ServiceSpecification \"\"\" _validation = { 'service_specification': {'readonly': True}, } _attribute_map", "indicates how long the video is kept in storage. Value", "None) self.provisioning_state = None self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model): \"\"\"Video", "existing pipeline topology. When activated, this pipeline job will process", "'name': {'readonly': True}, 'display_name': {'readonly': True}, 'blob_duration': {'readonly': True}, }", "self.additional_info = None class ErrorResponse(msrest.serialization.Model): \"\"\"Common error response for all", "remote tunnel securely established using IoT Hub device information. All", "outcome. The topology should be defined according to the scenario", "download_url: Video file download URL. This URL can be used", "__init__( self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type = None #", "pipeline. :type description: str :ivar state: Current state of the", "properties. :vartype token: str \"\"\" _validation = { 'expiration_date': {'readonly':", "* Processors: list of nodes which perform data analysis or", "'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map =", "used. :type bitrate_kbps: str \"\"\" _validation = { 'type': {'required':", "self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges = kwargs['ranges'] class", "str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of the key", "str :param inputs: Required. An array of upstream node references", "{'key': 'mode', 'type': 'str'}, } def __init__( self, **kwargs ):", "The token blob to be provided to the Azure Video", "to 'true' the RTSP playback URL will not be published,", "other live pipelines in your account. :type bitrate_kbps: int :ivar", "values for the user-defined topology parameters. A pipeline can only", "list of trusted authorities should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource", "credentials. It is recommended that this value is parameterized as", "type: str self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor allows", "super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None self.principal_id = None class UsernamePasswordCredentials(CredentialsBase):", "): super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description',", "generic RTSP server to be ingested into a pipeline. All", "'scale': {'key': 'scale', 'type': 'VideoScale'}, } def __init__( self, **kwargs", "of EdgeModuleEntity items. :param value: A collection of EdgeModuleEntity items.", "all video content is retained indefinitely. This property is only", "encoded. If omitted, encoder sets it automatically to try and", "None) self.archival = kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection of", "'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__(", "width: str :param mode: Describes the video scaling mode to", "When set to 'true' causes the certificate subject name validation", "The time range for requests in each blob. :vartype blob_duration:", "{ 'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type':", "a pipeline. All required parameters must be populated in order", "self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection of", "{'key': 'kid', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey':", "by server. :type type: str :param ranges: Required. The sequence", "for data to be stored or exported to other destinations.", "VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status of private endpoint connection operation. All required parameters", "__init__( self, **kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name'] self.value", "'[str]'}, 'audiences': {'key': 'audiences', 'type': '[str]'}, 'claims': {'key': 'claims', 'type':", "a sampling rate of 48 kHz). Allowed values are 96,", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'title': {'key': 'properties.title', 'type':", "= kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period',", "secret string in order to prevent this value to be", "'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "service will disconnect temporarily from the camera. It will retry", "resource. Variables are only populated by the server, and will", "expected use of the topology to be described here. :type", "\"\"\"Base class for access policies authentication methods. You probably want", "**kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "be populated in order to send to Azure. :param expiration_date:", "description. :type description: str \"\"\" _attribute_map = { 'provider': {'key':", "\"SecretString\", \"Int\", \"Double\", \"Bool\". :type type: str or ~video_analyzer.models.ParameterType :param", "failed pipeline job. :param code: The error code. :type code:", "True}, 'kind': {'required': True}, 'sku': {'required': True}, } _attribute_map =", "'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'},", "and archives the content can be reused across many different", "to Azure. :param user_assigned_identity: Required. The user assigned managed identity's", "\"\"\"The details of the user assigned managed identity used by", "class directly. Known sub-classes are: EccTokenKey, RsaTokenKey. All required parameters", "to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param", "'type': 'str'}, } def __init__( self, **kwargs ): super(StorageAccount, self).__init__(**kwargs)", "approximately double of the chosen video segment length. It is", "**kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id',", "volume of storage transactions. Larger segments reduce the amount of", "steps to be applied when processing content for a particular", "then the service will disconnect temporarily from the camera. It", "self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details", "{ 'id': {'readonly': True}, } _attribute_map = { 'id': {'key':", "str \"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[AccessPolicyEntity]'},", "use the sub-classes and not this class directly. Known sub-classes", "'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, } def __init__(", ":ivar required_members: The private link resource required member names. :vartype", "= '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class UserAssignedManagedIdentity(msrest.serialization.Model): \"\"\"The details of", "ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology parameter declaration. Declared parameters can and must", ":ivar name: The metric dimension name. :vartype name: str :ivar", "Endpoint Connection resource. Variables are only populated by the server,", "bool \"\"\" _validation = { 'name': {'readonly': True}, 'display_name': {'readonly':", "the credentials. It is recommended that this value is parameterized", "{'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type':", ":param e: Required. RSA public key exponent. :type e: str", "type: The additional info type. :vartype type: str :ivar info:", "{'key': 'password', 'type': 'str'}, } def __init__( self, **kwargs ):", "{'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly':", "be initialized and authorized to the cloud account. The provisioning", "self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = kwargs.get('transport', None)", "self).__init__(**kwargs) self.log_specifications = None self.metric_specifications = None class SinkNodeBase(NodeBase): \"\"\"Base", ":ivar source_mdm_namespace: The source MDM namespace. :vartype source_mdm_namespace: str :ivar", "request. :ivar name: The metric dimension name. :vartype name: str", "module through the Azure IoT Edge module twin properties. :vartype", "Possible values include: \"Average\", \"Count\", \"Total\". :vartype lock_aggregation_type: str or", "= kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): \"\"\"Base class for credential objects.", "Sink nodes allow pipeline data to be stored or exported.", "info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation = { 'code': {'readonly':", "**kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str", "encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs", "large: str \"\"\" _attribute_map = { 'small': {'key': 'small', 'type':", "to see if the camera bitrate is now below the", "): super(LogSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.blob_duration", "send to Azure. :param key_identifier: Required. The URL of the", "kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None)", "video and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video", "authenticating a TLS connection. A null list designates that Azure", "on how the input content should be processed. :type preset:", "self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity =", "} _attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'}, }", "video. :type bitrate_kbps: str :param frame_rate: The frame rate (in", "'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description',", "expiration_date: The expiration date of the registration token. The Azure", "metric_specifications: List of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\" _validation", "super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed", "date in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar", "): super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class", "str :param operation: The operation type. :type operation: str :param", "} def __init__( self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name =", "device_id: Required. The IoT device id to use when establishing", "to 1280x720. All required parameters must be populated in order", "Y coordinate. :type y: str \"\"\" _validation = { 'type':", "= None class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection of VideoAnalyzer items. :param", "None self.aggregation_type = None self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types',", "{ 'user_assigned_identity': {'required': True}, } _attribute_map = { 'user_assigned_identity': {'key':", "'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key':", "of private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _attribute_map =", "derived types.Constant filled by server. :type type: str :param ranges:", "'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__(", "to be ingested into a pipeline. All required parameters must", "streaming base URL. The archived content can be automatically played", "self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model):", "str :param rtsp_tunnel_url: Video low-latency streaming URL. The live content", "None # type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a custom preset", "be created on the service. These will not take effect", "'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'}, } def __init__(", "video type is 'archive' and preview images are enabled. :type", "'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__( self, **kwargs ): super(TokenKey,", "to false. :type has_data: bool :param is_in_use: Required. Value indicating", "Azure. :param can_stream: Required. Value indicating whether or not the", "type is 'archive' and preview images are enabled. :param small:", "'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, }", "Possible values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type created_by_type: str", "check availability request body. :param name: The name of the", "'disableRtspPublishing', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPublishingOptions,", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key':", "~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer account.", "be streamed. :type can_stream: bool :param has_data: Required. Value indicating", "SKU details. Variables are only populated by the server, and", "will ensure that one 'noisy neighbor' does not affect other", "'@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } _subtype_map", "dimension name. :vartype name: str :ivar display_name: The display name", "{'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map", "__init__( self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' #", "lives. :type location: str \"\"\" _validation = { 'id': {'readonly':", "self.type = None self.system_data = None class ProxyResource(Resource): \"\"\"The resource", "self.alg = kwargs['alg'] self.x = kwargs['x'] self.y = kwargs['y'] class", "VideoSource(SourceNodeBase): \"\"\"Video source allows for content from a Video Analyzer", "OData error response format.). :param error: The error object. :type", "{'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, **kwargs ):", "resource Private link DNS zone name. :type required_zone_names: list[str] \"\"\"", "} def __init__( self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date =", "**kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name = None self.display_name = None", "~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access for ingestion group. :type", "{ 'value': {'key': 'value', 'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type':", "image URL. :type small: str :param medium: Medium resolution preview", "True}, 'name': {'required': True}, 'video_name': {'required': True}, 'time_sequences': {'required': True},", "'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs)", "type: str self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options', None)", "to encrypt the Account Key. Possible values include: \"SystemKey\", \"CustomerKey\".", "'[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self,", "in the pipelines. All required parameters must be populated in", "value: A collection of Operation items. :type value: list[~video_analyzer.models.Operation] \"\"\"", "time range for requests in each blob. :vartype blob_duration: str", "The endpoint URL for Video Analyzer to connect to. :type", "'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def", "kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private Endpoint resource. Variables are", "keys allow for seamless key rotation of the token signing", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type':", "def __init__( self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname',", "to be applied across all the cameras. Individual instance properties", "'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type':", "'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},", "~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline topology", "\"Deactivating\". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of", "server. :type type: str :param username: Required. Username to be", "'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self,", "should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options", "preview_image_urls: Video preview image URLs. These URLs can be used", "str :ivar source_mdm_namespace: The source MDM namespace. :vartype source_mdm_namespace: str", "class PrivateLinkResource(Resource): \"\"\"A private link resource. Variables are only populated", "in the sequence. All required parameters must be populated in", "\"\"\" _validation = { 'type': {'required': True}, 'iot_hub_name': {'required': True},", "): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey':", "The metric name. :vartype name: str :ivar display_name: The metric", "to 3000 Kbps in increments of 100 Kbps. If the", "'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'},", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param role: Defines", "kwargs.get('operation', None) self.description = kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology", "\"\"\"The resource model definition for a Azure Resource Manager proxy", "to connect to RTSP cameras and/or generic RTSP servers. :type", "and will be ignored when sending a request. :ivar expiration_date:", "{ 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def", "self.next_link = kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology describes the", "= kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate', None) self.scale = kwargs.get('scale',", "issuers: List of expected token issuers. Token issuer is valid", ":vartype type: str :ivar info: The additional info. :vartype info:", "list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink", "case-sensitive. :type device_id: str \"\"\" _validation = { 'type': {'required':", "__init__( self, **kwargs ): super(IotHub, self).__init__(**kwargs) self.id = kwargs['id'] self.identity", "'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map = {", "for real-time content processing. When activated, this live pipeline will", "publishes content via the video resource. This property is only", "{'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption':", "{ 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__( self, **kwargs ):", "self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name = None self.display_name =", "list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource. :vartype", "str :ivar display_name: The metric display name. :vartype display_name: str", "metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param", "'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key':", "represents a unique instance of a batch topology, used for", "'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, } def __init__( self, **kwargs", "of the parameter. :type name: str :param type: Required. Type", "def __init__( self, **kwargs ): super(NodeBase, self).__init__(**kwargs) self.type = None", "{'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'id':", "encoding audio with the AAC codec. All required parameters must", "'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def", "and/or generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase \"\"\" _validation =", "can be up to 2048 characters long. :type description: str", "are exchanged through long lived HTTP connections, and the RTP", "'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } def __init__(", "'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type':", "__init__( self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' #", "'value', 'type': '[VideoAnalyzer]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerCollection,", "= kwargs['key_identifier'] self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input parameters", "'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type':", "kilobits per second or Kbps, at which audio should be", "'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType',", "results in a video of type 'archive'. If used in", "'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key':", "to RTSP cameras and/or generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase", ":param has_data: Required. Value indicating whether or not there has", "{ 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type':", "'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type': 'VideoArchival'}, } def __init__( self,", "'video_name': {'required': True}, 'time_sequences': {'required': True}, } _attribute_map = {", "ResourceIdentity(msrest.serialization.Model): \"\"\"The user assigned managed identity to use when accessing", "self.name = kwargs['name'] self.tier = None class StorageAccount(msrest.serialization.Model): \"\"\"The details", "'alg': {'key': 'alg', 'type': 'str'}, 'x': {'key': 'x', 'type': 'str'},", ":ivar current_key_identifier: The current key used to encrypt Video Analyzer", "status: Operation status. :type status: str :param error: The error", "'[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, } def __init__( self,", "{'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters':", "sub-classes are: JwtAuthentication. All required parameters must be populated in", "2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token: The content token value", "false. :type has_data: bool :param is_in_use: Required. Value indicating whether", "length indicates the length of individual content files (segments) which", "{'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'video_name': {'required':", "The error code. :vartype code: str :ivar message: The error", ":type location: str \"\"\" _validation = { 'id': {'readonly': True},", "} def __init__( self, **kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title =", "to Azure. :param name: Required. Name of the claim which", "'true', then \"disableRtspPublishing\" must be set to 'false'. :type disable_archive:", "'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key':", "file, and published via a video resource of type 'file'.", "live topology, used for real-time ingestion, archiving and publishing of", "Moreover, an ongoing video recording can be played in \"live", "self.value = kwargs.get('value', None) class PrivateLinkResource(Resource): \"\"\"A private link resource.", "self, **kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class", "indicating whether or not the video can be streamed. Only", "{'key': 'height', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, 'mode':", "None class ProxyResource(Resource): \"\"\"The resource model definition for a Azure", "type 'file'. All required parameters must be populated in order", "\"\"\" _validation = { 'name': {'readonly': True}, 'status': {'readonly': True},", "{'key': 'properties.groupId', 'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names':", "True}, 'device_id': {'required': True}, } _attribute_map = { 'type': {'key':", "list of private link resources. :param value: Array of private", "target. :vartype target: str :ivar details: The error details. :vartype", "'str'}, 'description': {'key': 'description', 'type': 'str'}, 'actions_required': {'key': 'actionsRequired', 'type':", "of the connection between service consumer and provider. :param status:", "'str'}, } def __init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type", "sub-classes and not this class directly. Known sub-classes are: EccTokenKey,", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "name. :vartype name: str :ivar display_name: The metric display name.", "self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.identity = kwargs.get('identity', None) self.storage_accounts", "storage. It must be provided in the ISO8601 duration format", ":type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder preset, which", "server. :type type: str :param name: Required. Node name. Must", ":ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible", "Describes the tunnel through which Video Analyzer can connect to", "self.endpoints = None self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs',", "class EndpointBase(msrest.serialization.Model): \"\"\"Base class for endpoints. You probably want to", "**kwargs ): super(Operation, self).__init__(**kwargs) self.name = kwargs['name'] self.display = kwargs.get('display',", "kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources', None)", "~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "__init__( self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags', None)", "RTSP messages are exchanged through long lived HTTP connections, and", "self, **kwargs ): super(IotHub, self).__init__(**kwargs) self.id = kwargs['id'] self.identity =", "'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def", "'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status',", "video content authorization token to download the most recent still", "a string. The datetime values should follow IS08601, and the", "type is 'archive' and preview images are enabled. :type preview_image_urls:", "Required. Value indicating whether or not there has ever been", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__( self,", "{'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, }", "self.claims = kwargs.get('claims', None) self.keys = kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model):", "to 'true' content will not be archived or recorded. This", "format.). :param error: The error object. :type error: ~video_analyzer.models.ErrorDetail \"\"\"", "TLS connection. By default, strict validation is used. :type validation_options:", "pipeline instances which share the same processing characteristics. For instance,", "resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with", ":param created_by_type: The type of identity that created the resource.", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs)", "throughout the topology nodes through the use of \"${PARAMETER_NAME}\" string", "'action_type': {'key': 'actionType', 'type': 'str'}, } def __init__( self, **kwargs", "messages are exchanged through long lived HTTP connections, and the", "~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Key Vault", "or recorded. This is used, for example, when the topology", "types.Constant filled by server. :type type: str \"\"\" _validation =", "} def __init__( self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type =", "may be gated on events or camera may not be", ":type bitrate_kbps: str \"\"\" _validation = { 'type': {'required': True},", "types.Constant filled by server. :type type: str :param audio_encoder: Describes", "= { 'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType',", "class VideoArchival(msrest.serialization.Model): \"\"\"Video archival properties. :param retention_period: Video retention period", "'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type':", "kid: str \"\"\" _validation = { 'type': {'required': True}, 'kid':", "resource can lead to errors when uploading content to the", "\"\"\" _attribute_map = { 'height': {'key': 'height', 'type': 'str'}, 'width':", "None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control", "\"Archive\", \"File\". :vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags: Video", "Public network access for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param", "} _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, }", "class for nodes. You probably want to use the sub-classes", "__init__( self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' #", "5 minutes, in 30 seconds increments. :type segment_length: str \"\"\"", "'@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'}", "None) class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for controlling the validation of TLS", "'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection of", "str or ~video_analyzer.models.SkuName :ivar tier: The SKU tier. Possible values", "parameter. :type name: str :param type: Required. Type of the", "{'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks':", "Possible values include: \"ClientApi\". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\"", "x: str :param y: Required. Y coordinate. :type y: str", "{'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ):", "True}, 'n': {'required': True}, 'e': {'required': True}, } _attribute_map =", "\"Succeeded\". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint", "kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the parameter value of an", "of identity that last modified the resource. Possible values include:", "'type': 'str'}, } def __init__( self, **kwargs ): super(IotHub, self).__init__(**kwargs)", "HTTP. When using TCP, the RTP packets are interleaved on", "self.ingestion = kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model):", "recommended that this value is parameterized as a secret string", "key used to encrypt the Account Key. Possible values include:", "{'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, }", "'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, } def __init__( self,", "_attribute_map = { 'value': {'key': 'value', 'type': '[VideoAnalyzer]'}, } def", "required information for Video Analyzer to connect to RTSP cameras", "'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key':", "} def __init__( self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length =", "= { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly':", "Resource tags. :type tags: dict[str, str] :param location: Required. The", "information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an", "'str'}, } def __init__( self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive", "= kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection of VideoEntity items.", "to use when authenticating a TLS connection. By default, strict", "str :param created_by_type: The type of identity that created the", "update operation for a Video Analyzer account. Variables are only", "pipeline represents a unique instance of a live topology, used", ":param topology_name: Reference to an existing pipeline topology. When activated,", "in the ISO8601 duration format in the granularity of days,", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type':", "segments which are intended to be kept in storage. It", "source_mdm_account: The source MDM account. :vartype source_mdm_account: str :ivar source_mdm_namespace:", "\"kind\" is set to \"live\". :param disable_archive: When set to", "kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): \"\"\"Base type", "and a 'location'. Variables are only populated by the server,", "ID generated for the instance of the Video Analyzer edge", "tunnel through which Video Analyzer can connect to the endpoint", "\"Standard\". :vartype tier: str or ~video_analyzer.models.SkuTier \"\"\" _validation = {", "checked. :type name: str :param type: The resource type. :type", "'Pad' or 'Stretch' then both width and height must be", "sequence. All required parameters must be populated in order to", "str \"\"\" _attribute_map = { 'name_available': {'key': 'nameAvailable', 'type': 'bool'},", "self.n = kwargs['n'] self.e = kwargs['e'] class SourceNodeBase(NodeBase): \"\"\"Base class", "pipeline data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase]", "zero, and less than or equal to 300. If omitted,", "_attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'identity': {'key':", ":type created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at: The timestamp of", "= kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): \"\"\"The video scaling information. :param", "ingested from cameras. * Processors: list of nodes which perform", ":type type: str :param issuers: List of expected token issuers.", "Azure Resource Manager APIs to return error details for failed", "for specified resources under the Video Analyzer account. Possible values", "'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__(", "are persisted to storage. Smaller segments provide lower archive playback", "parameter. Possible values include: \"String\", \"SecretString\", \"Int\", \"Double\", \"Bool\". :type", "TLS transport (data is encrypted in transit). All required parameters", "'value': {'required': True}, } _attribute_map = { 'name': {'key': 'name',", "'type': 'str'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'time_sequences': {'key': 'timeSequences',", "defined according to the scenario to be achieved and can", "sequence of datetime ranges. Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type ranges: str", "token: str \"\"\" _validation = { 'expiration_date': {'readonly': True}, 'token':", "# type: Optional[str] class TokenKey(msrest.serialization.Model): \"\"\"Key properties for JWT token", "list of user defined parameters that can be references across", "y: str \"\"\" _validation = { 'type': {'required': True}, 'kid':", "'[str]'}, } def __init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name", "a live, low-latency feed is available from the source. :type", "mode is 'PreserveAspectRatio' then only one of width or height", "access is allowed for resources under the Video Analyzer account.", "through the use of \"${PARAMETER_NAME}\" string pattern. Parameters can have", "when sending a request. :ivar service_specification: The service specifications. :vartype", "identifier. :type id: str :param identity: Required. The IoT Hub", "system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource of private end point.", ":param created_by: The identity that created the resource. :type created_by:", "include: \"ClientApi\". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType \"\"\" _validation =", "= kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options", "is 'false'. If set to 'true', then \"disableArchive\" must be", "Licensed under the MIT License. See License.txt in the project", "pipeline topology which captures content from a RTSP camera and", "\"\"\"Base class for topology processor nodes. You probably want to", "name. :vartype display_name: str :ivar blob_duration: The time range for", "unique within the topology. :type name: str :param inputs: Required.", "RS384 or RS512. Possible values include: \"RS256\", \"RS384\", \"RS512\". :type", "'str'}, } def __init__( self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date", "def __init__( self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase'", "accessing the encryption keys in Key Vault. Variables are only", "} def __init__( self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name =", "identity: Required. The IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar", "{'readonly': True}, 'display_name': {'readonly': True}, 'display_description': {'readonly': True}, 'unit': {'readonly':", "\"\"\" _validation = { 'type': {'required': True}, 'name': {'required': True},", "updates on the consumer. :type actions_required: str \"\"\" _attribute_map =", "for archiving content. Default is 'false'. If set to 'true',", "**kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info = None", "= None self.blob_duration = None class MetricDimension(msrest.serialization.Model): \"\"\"A metric dimension.", "the resource on API requests. :type password: str \"\"\" _validation", "{'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details':", "super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)", "def __init__( self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive',", "that created the resource. Possible values include: \"User\", \"Application\", \"ManagedIdentity\",", "self.status = None self.error = None class LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline", "to send to Azure. :param expiration_date: Required. The desired expiration", "'iot_hub_name': {'key': 'iotHubName', 'type': 'str'}, 'device_id': {'key': 'deviceId', 'type': 'str'},", "= { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly':", "~video_analyzer.models.ErrorDetail \"\"\" _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetail'},", "'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key':", "too many results to return in one response). :type next_link:", "def __init__( self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name']", "must be populated in order to send to Azure. :param", "list[~video_analyzer.models.PipelineJob] :param next_link: A link to the next page of", "JWT token header. :type kid: str \"\"\" _validation = {", "processed. You probably want to use the sub-classes and not", "by server. :type type: str :param credentials: Required. Credentials to", "error object. :type error: ~video_analyzer.models.ErrorDetail \"\"\" _attribute_map = { 'error':", "{'key': 'value', 'type': '[Operation]'}, } def __init__( self, **kwargs ):", "'type': 'VideoScale'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} }", "= { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map", "'type': 'AuthenticationBase'}, } def __init__( self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs)", "'str'}, } def __init__( self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value", "job will process content according to the pipeline topology definition.", "'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'},", "= kwargs['name'] class Endpoint(msrest.serialization.Model): \"\"\"The endpoint details. All required parameters", "order to send to Azure. :param can_stream: Required. Value indicating", "be applied across all the cameras. Individual instance properties can", "'AudioEncoderAac'} } def __init__( self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type", "tags. Resource tags. :type tags: dict[str, str] :param identity: The", ":vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags: Video flags contain", "interleaved on the TCP RTSP connection. When using HTTP, the", "be archived or recorded. This is used, for example, when", "name is available. :type message: str \"\"\" _attribute_map = {", "None) self.description = kwargs.get('description', None) self.actions_required = kwargs.get('actions_required', None) class", "'[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name", ":param is_data_action: Whether the operation applies to data-plane. :type is_data_action:", "Optional video properties to be used in case a new", "server, and will be ignored when sending a request. :param", "node. :type inputs: list[~video_analyzer.models.NodeInput] \"\"\" _validation = { 'type': {'required':", "include: \"Average\", \"Count\", \"Total\". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar", "def __init__( self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials'", "{'required': True}, 'preset': {'required': True}, } _attribute_map = { 'type':", "'can_stream': {'required': True}, 'has_data': {'required': True}, 'is_in_use': {'required': True}, }", "for expected token claims. All required parameters must be populated", ":param iot_hubs: The IoT Hubs for this resource. :type iot_hubs:", "\"live\". :type segment_length: str :param retention_period: Video retention period indicates", "be applied on this specific pipeline. :type value: str \"\"\"", "directly. Known sub-classes are: VideoEncoderH264. All required parameters must be", "'bool'}, 'action_type': {'key': 'actionType', 'type': 'str'}, } def __init__( self,", "cameras. Individual instance properties can be defined through the use", "kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): \"\"\"Properties for", "When absent (null), all video content is retained indefinitely. This", "help define the authentication rules, and control access to specific", "Individual instance properties can be defined through the use of", "Analyzer account, including the key version. :vartype current_key_identifier: str \"\"\"", ":vartype display_description: str :ivar unit: The metric unit. Possible values", "length indicates the length of individual video files (segments) which", "'alg', 'type': 'str'}, 'n': {'key': 'n', 'type': 'str'}, 'e': {'key':", "\"Enabled\", \"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map =", "\"Creating\", \"Deleting\", \"Failed\". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation", "id: Required. The ID of the storage account resource. Video", "{ 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__( self, **kwargs ):", ":param value: Parameter value to be applied on this specific", "super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message = None self.target =", "information about the video and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo", "Must be unique within the topology. :type name: str \"\"\"", "specific to a single video. :vartype token: str \"\"\" _validation", "str \"\"\" _validation = { 'type': {'required': True}, 'kid': {'required':", "system_data: ~video_analyzer.models.SystemData :ivar group_id: The private link resource group id.", "class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview image URLs. These URLs can be", "= { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource',", "the resource. :type created_by: str :param created_by_type: The type of", ":param video_encoder: Describes a custom preset for encoding video. :type", "username: str :param password: Required. Password to be presented as", "the given values. :type issuers: list[str] :param audiences: List of", "captures content from a RTSP camera and archives the content", "'preset': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state", "mode\" with latencies which are approximately double of the chosen", "= None self.system_data = None class ProxyResource(Resource): \"\"\"The resource model", ":param archival: Video archival properties. :type archival: ~video_analyzer.models.VideoArchival \"\"\" _validation", "Required. The discriminator for derived types.Constant filled by server. :type", "TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for controlling the validation of TLS endpoints. :param", "'type': 'TlsValidationOptions'}, } def __init__( self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs)", "then \"disableArchive\" must be set to 'false'. :type disable_rtsp_publishing: str", "identity to use when accessing a resource. All required parameters", "presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required.", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key':", "'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes',", ":param kind: Topology kind. Possible values include: \"Live\", \"Batch\". :type", ":vartype display_name: str :ivar display_description: The metric display description. :vartype", "self).__init__(**kwargs) self.service_specification = None class ResourceIdentity(msrest.serialization.Model): \"\"\"The user assigned managed", "tags: A set of tags. Resource tags. :type tags: dict[str,", "__init__( self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' #", "server. :type type: str :param kid: Required. JWT token key", "properties. A provisioning token allows for a single instance of", "**kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state =", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} }", "algorithm to be used: ES256, ES384 or ES512. Possible values", ":param type: Required. Type of the parameter. Possible values include:", "True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly': True},", "archived content can be automatically played by the Azure Video", "to 2048 characters long. :type description: str :param segment_length: Segment", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = kwargs['certificates']", "{'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'x':", "True}, 'ranges': {'required': True}, } _attribute_map = { 'type': {'key':", "str :param video_creation_properties: Optional video properties to be used in", "required_zone_names: list[str] \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "content to the archive. Default value is 30 seconds. This", ":param preset: Required. The encoder preset, which defines the recipe", "EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection of EdgeModuleEntity items. :param value: A collection", "str :ivar status: The status of the pipeline job operation.", "IoT Hub resource identifier. :type id: str :param identity: Required.", "in a live topology allows for video and audio to", "directly. Known sub-classes are: PemCertificateList. All required parameters must be", "\"\"\"The IoT Hub details. Variables are only populated by the", "def __init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name = None", "self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type'] self.description = kwargs.get('description',", "'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'x':", "not this class directly. Known sub-classes are: PemCertificateList. All required", "directly. Known sub-classes are: RtspSource, VideoSource. All required parameters must", "Video preview image URLs. These URLs can be used in", "~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs to the video content.", "client ID. :vartype client_id: str :ivar principal_id: The principal ID.", "Operation status. :type status: str :param error: The error detail.", "from the video archive in different resolutions. They are available", "**kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description =", "the current video state. All required parameters must be populated", "(optionally) encrypted. Variables are only populated by the server, and", "self.service_specification = None class ResourceIdentity(msrest.serialization.Model): \"\"\"The user assigned managed identity", "used to validate access tokens. Having multiple keys allow for", "pipeline. All required parameters must be populated in order to", "'system_data': {'readonly': True}, 'location': {'required': True}, 'endpoints': {'readonly': True}, 'provisioning_state':", "class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input parameters to generate registration token for", "'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'details':", "self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A collection of VideoAnalyzer items.", "'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type':", "str :param segment_length: Segment length indicates the length of individual", "self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id =", "'str'}, 'device_id': {'key': 'deviceId', 'type': 'str'}, } def __init__( self,", "None) self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state", "def __init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error',", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'edge_module_id': {'readonly': True},", "connected to the Internet prior to the token expiration date.", "set to true, then no content is archived. :type video_name:", "resource ID. :type id: str :param start_time: Operation start time.", "add up to 24 hours or less. Currently, there can", "List of trusted certificate authorities when authenticating a TLS connection.", "or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public key modulus. :type", "'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type':", "second or Kbps, at which audio should be encoded (2-channel", "recorded media within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase \"\"\" _validation", "None) self.preview_image_urls = kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): \"\"\"Optional properties to", "be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys: List of keys", "'kid': {'required': True}, 'alg': {'required': True}, 'n': {'required': True}, 'e':", "'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt',", "type: str :param ranges: Required. The sequence of datetime ranges.", ":param archive_base_url: Video archive streaming base URL. The archived content", "transactions while increasing the archive playback latency. Value must be", "for a single instance of Azure Video analyzer IoT edge", "or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer.", "RTSP connection. When using HTTP, the RTSP messages are exchanged", "in case the module state lost or reset. Variables are", "{'required': True}, 'alg': {'required': True}, 'n': {'required': True}, 'e': {'required':", "display_name: The diagnostic log category display name. :vartype display_name: str", "list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source", "**kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs.get('value',", "None) class PipelineTopology(ProxyResource): \"\"\"Pipeline topology describes the processing steps to", "'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly': True}, } _attribute_map = {", "published, disabling low latency streaming. This is used, for example,", ":vartype name: str :ivar type: The type of the resource.", "instance of Azure Video analyzer IoT edge module to be", "~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology. When", "\"\"\" _attribute_map = { 'download_url': {'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url':", "contains too many results to return in one response). :type", "or less. Currently, there can be only one range specified", "str self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model): \"\"\"The SKU details. Variables", "If set to 'true', then \"disableArchive\" must be set to", "The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The", "kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): \"\"\"Operation details. :param provider: The service", "{'key': 'ignoreSignature', 'type': 'str'}, } def __init__( self, **kwargs ):", "{'readonly': True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly':", "available video actions and its dynamic properties based on the", "provided. Possible values include: \"Pad\", \"PreserveAspectRatio\", \"Stretch\". :type mode: str", "of the pipeline (read-only). Possible values include: \"Inactive\", \"Activating\", \"Active\",", "'name': {'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly': True}, }", "'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def", "self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None self.required_members =", "def __init__( self, **kwargs ): super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource'", "{'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__( self, **kwargs ):", "self.source_mdm_account = None self.source_mdm_namespace = None self.supported_time_grain_types = None class", "'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'action_type': {'key': 'actionType', 'type': 'str'},", "'properties.groupId', 'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key':", "request. :ivar log_specifications: List of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification]", "self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.identity", "class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level network access control. :param public_network_access: Whether", "'details': {'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map = {", "server, and will be ignored when sending a request. All", "any time and the new desired retention period will be", "key. :type keys: list[~video_analyzer.models.TokenKey] \"\"\" _validation = { 'type': {'required':", "self, **kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification = None class ResourceIdentity(msrest.serialization.Model):", "Video Analyzer to connect to RTSP cameras and/or generic RTSP", "'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def", "needs to be created on the service. These will not", "'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},", "You probably want to use the sub-classes and not this", "system_data: ~video_analyzer.models.SystemData \"\"\" _validation = { 'id': {'readonly': True}, 'name':", "'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'group_id':", "None) class VideoEntityCollection(msrest.serialization.Model): \"\"\"A collection of VideoEntity items. :param value:", "is_data_action: bool :param action_type: Indicates the action type. Possible values", "name: The name of the pipeline job operation. :vartype name:", "fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags',", "class CheckNameAvailabilityRequest(msrest.serialization.Model): \"\"\"The check availability request body. :param name: The", "= kwargs.get('reason', None) self.message = kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): \"\"\"Base", "List of the instance level parameter values for the user-defined", "Processor nodes enable pipeline data to be analyzed, processed or", "to P30D (30 days), content older than 30 days will", "None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): \"\"\"Status of video analyzer operation. All required", "kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state = None self.expiration", "= { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type',", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource of", "unit. Possible values include: \"Bytes\", \"Count\", \"Milliseconds\". :vartype unit: str", "\"\"\" _attribute_map = { 'value': {'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link':", "identity that created the resource. :type created_by: str :param created_by_type:", "\"\"\" _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource':", "**kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.identity =", "items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link: A link to the", "URL. :type small: str :param medium: Medium resolution preview image", "str :param retention_period: Video retention period indicates how long the", "'status', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'actions_required': {'key':", "None) class PrivateLinkResource(Resource): \"\"\"A private link resource. Variables are only", "= kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): \"\"\"Base class for tunnel objects. You", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'},", "if the camera bitrate is now below the reserved capacity.", "consumption. :type download_url: str :param archive_base_url: Video archive streaming base", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, }", "= None class VideoContentUrls(msrest.serialization.Model): \"\"\"Set of URLs to the video", "are: RtspSource, VideoSource. All required parameters must be populated in", "= { 'name': {'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly':", "from RTSP cameras through live pipelines or can be created", "is valid if it matches at least one of the", "super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers =", "public key exponent. :type e: str \"\"\" _validation = {", "**kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type']", "Describes a custom preset for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase", "\"\"\"Pipeline topology describes the processing steps to be applied when", "{'key': '@type', 'type': 'str'}, 'ranges': {'key': 'ranges', 'type': 'str'}, }", "\"Enabled\", \"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network", "{'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period':", "status. :type status: str :param error: The error detail. :type", "self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs.get('value', None) class PemCertificateList(CertificateSource):", "id: The ARM identifier for Private Endpoint. :vartype id: str", "= None class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type for all encoder presets,", "self.type_properties_type = None self.flags = None self.content_urls = None self.media_info", "list[str] :param claims: List of additional token claims to be", "information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology kind. Possible values", "'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, } def __init__( self, **kwargs", "{'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, 'endpoints': {'readonly':", "videos can be downloaded as MP4 files. Variables are only", ":type height: str :param width: The desired output video width.", "when sending a request. :ivar expiration_date: The content token expiration", "\"\"\" _attribute_map = { 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, }", "The metric aggregation type. Possible values include: \"Average\", \"Count\", \"Total\".", "{ 'type': {'key': 'type', 'type': 'str'}, 'info': {'key': 'info', 'type':", "video content is retained indefinitely. This property is only allowed", "{'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase':", "'[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type ranges: str \"\"\" _validation = { 'type':", "'type': '[TokenKey]'}, } def __init__( self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs)", "super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): \"\"\"Operation details.", "'endpoint': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "None) self.status = None class SystemData(msrest.serialization.Model): \"\"\"Metadata pertaining to creation", "user assigned managed identity to use when accessing a resource.", "collection of PipelineTopology items. :param value: A collection of PipelineTopology", ":param rtsp_tunnel_url: Video low-latency streaming URL. The live content can", "The metric dimension name. :vartype name: str :ivar display_name: The", "'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type':", "of the Key Vault key used to encrypt the account.", "collection (when the collection contains too many results to return", "be provided in the ISO8601 duration format in the granularity", "when the topology is used only for archiving content. Default", "defined. Topology parameters with a default value can be optionally", "Curve algorithm. All required parameters must be populated in order", "action_type: str or ~video_analyzer.models.ActionType \"\"\" _validation = { 'name': {'required':", "def __init__( self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity']", "self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name = kwargs['name'] class", "interleaved in the HTTP connections alongside the RTSP messages. Possible", "options to use when authenticating a TLS connection. By default,", "uses the average frame rate of the input video. :type", "'error': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name',", "'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'}, } def __init__( self, **kwargs", "which can be used to validate access tokens. Having multiple", "{'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ):", "} _attribute_map = { 'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, }", "{'key': 'username', 'type': 'str'}, 'password': {'key': 'password', 'type': 'str'}, }", "archived, and published via a video resource. If archiving is", "None) class GroupLevelAccessControl(msrest.serialization.Model): \"\"\"Group level network access control. :param public_network_access:", "so will ensure that one 'noisy neighbor' does not affect", "'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at': {'key':", "only define or override parameters values for parameters which have", "class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a built-in preset for encoding the input", "input signal to be used on a pipeline node. All", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__(", "= kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): \"\"\"Details", "\"Tcp\". :type transport: str or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP", "indicating if changes on the service provider require any updates", "signing key. Token signature must match exactly one key. :type", "'@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport': {'key':", "{'key': 'currentKeyIdentifier', 'type': 'str'}, } def __init__( self, **kwargs ):", ":type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of", "cameras through live pipelines or can be created by exporting", "{'required': True}, 'video_name': {'required': True}, } _attribute_map = { 'type':", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type':", ":param mode: Describes the video scaling mode to be applied.", "~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of the key used to", "by server. :type type: str :param username: Required. Username to", "video. If omitted, the encoder uses the resolution of the", "None) self.video_publishing_options = kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): \"\"\"Video source allows", "None) self.state = None self.parameters = kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model):", "'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},", "RS512. Possible values include: \"RS256\", \"RS384\", \"RS512\". :type alg: str", "\"\"\"Represents a video resource within Azure Video Analyzer. Videos can", "# type: str self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a", "self, **kwargs ): super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type:", "items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link to the", "str :param kid: Required. JWT token key id. Validation keys", "input video. :type bitrate_kbps: str :param frame_rate: The frame rate", ":type origin: str :param properties: Operation properties format. :type properties:", "{'key': 'value', 'type': '[PrivateLinkResource]'}, } def __init__( self, **kwargs ):", "\"\"\" _validation = { 'type': {'required': True}, 'kid': {'required': True},", "a WebSocket tunneled RTSP stream. It is available when the", "and publish content. Note: if downstream of RTSP source, and", "order to send to Azure. :param expiration_date: Required. The desired", "\"\"\"Optional properties to be used in case a new video", "str :param video_name: Required. Name of the Video Analyzer video", "{'required': True}, 'e': {'required': True}, } _attribute_map = { 'type':", "assigned managed identity used by the Video Analyzer resource. Variables", "'preset', 'type': 'EncoderPresetBase'}, } def __init__( self, **kwargs ): super(EncoderProcessor,", "= None class ErrorDetail(msrest.serialization.Model): \"\"\"The error detail. Variables are only", "**kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None self.status = None", "'[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name", "Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the", "video actions and its dynamic properties based on the current", "\"SystemKey\", \"CustomerKey\". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The", "the key version. :vartype current_key_identifier: str \"\"\" _validation = {", "information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags.", "str :param certificates: Required. PEM formatted public certificates. One certificate", "): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type = kwargs.get('type',", "): super(TimeSequenceBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class", "transformations. * Sinks: list of one or more data sinks", ":param disable_archive: When set to 'true' content will not be", "resource lives. :type location: str \"\"\" _validation = { 'id':", "'PreserveAspectRatio' then only one of width or height need be", "Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type ranges: str \"\"\" _validation = {", "kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls', None)", "{ 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, **kwargs ):", "ranges: Required. The sequence of datetime ranges. Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'.", "is retained indefinitely. This property is only allowed for topologies", "# type: str self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None)", "of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\". :vartype type: str", "{'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, }", "kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): \"\"\"A collection of AccessPolicyEntity items. :param", "used only for low latency video streaming. Default is 'false'.", "} _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token':", "'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( self, **kwargs ): super(MetricSpecification,", "'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},", "will not take effect if the video already exists. :param", "'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self,", "associated storage account. Variables are only populated by the server,", "None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status", "rotation of the token signing key. Token signature must match", "): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link',", "ARM identifier for Private Endpoint. :vartype id: str \"\"\" _validation", "mode: str or ~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map = { 'height': {'key':", "only allowed for topologies where \"kind\" is set to \"live\".", "as long as the same processing is to be applied", "Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). :type id: str :param", "None) class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology describes the processing steps to", "= { 'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason',", "all video encoding presets, which define the recipe or instructions", "_attribute_map = { 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key':", "Required. The identity type. :type type: str :param user_assigned_identities: The", "str :param transport: Network transport utilized by the RTSP and", "'type': 'str'}, } def __init__( self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs)", "super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time", "which video should be encoded. If omitted, encoder sets it", "the certificate subject name validation to be skipped. Default is", "kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags used to change how", "types.Constant filled by server. :type type: str :param username: Required.", "\"\"\"The SKU details. Variables are only populated by the server,", "to Azure. :param node_name: Required. The name of the upstream", "None self.type = None self.system_data = None class ProxyResource(Resource): \"\"\"The", "} def __init__( self, **kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url =", "True}, 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True},", "} def __init__( self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title =", "account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of", "types are suitable for different applications and scenarios. Possible values", "= kwargs.get('description', None) self.state = None self.expiration = None self.error", "The additional info type. :vartype type: str :ivar info: The", "'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'n':", "self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.type_properties_type", "returned in the response for all Azure Resource Manager resources.", "ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the parameter value of an specific pipeline topology", "{'key': 'name', 'type': 'str'}, } def __init__( self, **kwargs ):", "\"\"\"Set of URLs to the video content. :param download_url: Video", "True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly': True},", "value: list[~video_analyzer.models.PrivateLinkResource] \"\"\" _attribute_map = { 'value': {'key': 'value', 'type':", "audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties.", "\"Batch\". :type kind: str or ~video_analyzer.models.Kind :param sku: Required. Describes", "this value after the initial call to create the video", "information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated for", "super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None)", "'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details',", "kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period', None)", "file. The resulting MP4 file can be played on any", "{'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, }", "TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated in order", "in the response for all Azure Resource Manager resources. Variables", "is (optionally) encrypted. Variables are only populated by the server,", "the Internet prior to the token expiration date. :type expiration_date:", "10 years. For example, if this is set to P30D", "of the input video. :type frame_rate: str :param scale: Describes", "declared in the pipeline topology. :type name: str :param value:", "'type': 'bool'}, 'is_in_use': {'key': 'isInUse', 'type': 'bool'}, } def __init__(", "The identities associated to the Video Analyzer resource. :type identity:", "node_name: Required. The name of the upstream node in the", "derived types.Constant filled by server. :type type: str :param username:", "server, and will be ignored when sending a request. :ivar", "\"\"\" _validation = { 'expiration_date': {'readonly': True}, 'token': {'readonly': True},", "inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of a new or", "\"Average\", \"Count\", \"Total\". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types:", "= { 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications',", "about the associated storage account. Variables are only populated by", "Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'endpoints':", "= kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): \"\"\"The managed identity for the", "compatible players. Exported videos can be downloaded as MP4 files.", "for video and audio to be stored as a file,", "of absolute datetime ranges as a string. The datetime values", "set to 'true' the RTSP playback URL will not be", "error response format.). :param error: The error object. :type error:", "Required. Name of a new or existing video resource used", "'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default': {'key': 'default', 'type':", ":type name: str :param type: Required. Type of the parameter.", "EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token properties. A provisioning token allows for a", "to be used in case a new video resource needs", "Default is 'false'. If set to 'true', then \"disableArchive\" must", "'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key':", "\"\"\"The resource management error additional info. Variables are only populated", "name: The name of the live pipeline operation. :vartype name:", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to", "self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The input parameters to generate", "_attribute_map = { 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, } def", "ranges: str \"\"\" _validation = { 'type': {'required': True}, 'ranges':", ":param end_time: Operation end time. :type end_time: str :param status:", ":param expiration_date: Required. The desired expiration date of the registration", "a custom preset for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param", "to be initialized and authorized to the cloud account. The", "'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs", "\"Application\", \"ManagedIdentity\", \"Key\". :type created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at:", "for topologies where \"kind\" is set to \"live\". :type retention_period:", "not available. Possible values include: \"Invalid\", \"AlreadyExists\". :type reason: str", "'type': {'required': True}, 'ranges': {'required': True}, } _attribute_map = {", "self.name = kwargs['name'] self.value = kwargs['value'] class TrackedResource(Resource): \"\"\"The resource", "is being received. For example, video recording may be gated", "{'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__( self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs)", "{'key': '@type', 'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type': 'str'}, 'device_id':", "type: str \"\"\" _attribute_map = { 'name': {'key': 'name', 'type':", ":ivar name: The metric name. :vartype name: str :ivar display_name:", "values are 96, 112, 128, 160, 192, 224, and 256.", "key used to encrypt Video Analyzer account, including the key", "List of the topology parameter declarations. Parameters declared here can", "account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'endpoints': {'readonly':", "are enabled. :param small: Low resolution preview image URL. :type", "description: str :ivar type_properties_type: Video content type. Different content types", ":param name: Required. The SKU name. Possible values include: \"Live_S1\",", "camera. Variables are only populated by the server, and will", "of Operation items. :param value: A collection of Operation items.", "presented as part of the credentials. :type username: str :param", "rate (in frames per second) of the encoded video. The", "not this class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All required", "'type', 'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def", "are 96, 112, 128, 160, 192, 224, and 256. If", "be used when validating client API access. :type authentication: ~video_analyzer.models.AuthenticationBase", "not affect other live pipelines in your account. :type bitrate_kbps:", ":type description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps", "super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource): \"\"\"Live pipeline represents", "} def __init__( self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind =", "= kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token properties. A provisioning", "it to be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys: List", "include: \"Live\", \"Batch\". :type kind: str or ~video_analyzer.models.Kind :param sku:", "None class VideoContentUrls(msrest.serialization.Model): \"\"\"Set of URLs to the video content.", "ignore_hostname: str :param ignore_signature: When set to 'true' causes the", "on the key id present on the JWT token header.", "self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate', None) self.scale =", "the given name is not available. Possible values include: \"Invalid\",", "order to prevent this value to be returned as part", "VideoFlags(msrest.serialization.Model): \"\"\"Video flags contain information about the available video actions", "self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg = kwargs['alg'] self.n", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},", "Azure. :param node_name: Required. The name of the upstream node", "user-defined parameters, which allow for a topology to be parameterized.", "audio to be captured, optionally archived, and published via a", "level granted by this policy. Possible values include: \"Reader\". :type", "(no encryption in transit). All required parameters must be populated", "self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class", "} def __init__( self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name =", "this pipeline job will process content according to the pipeline", "{'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags':", "ingested into a pipeline. Currently supported only with batch pipelines.", "A collection of Operation items. :type value: list[~video_analyzer.models.Operation] \"\"\" _attribute_map", "\"\"\" _validation = { 'id': {'required': True}, 'status': {'readonly': True},", "\"Failed\", \"InProgress\", \"Succeeded\". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections:", "'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'}, } def __init__( self,", "and preview images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map", "'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = {", "True}, 'preset': {'required': True}, } _attribute_map = { 'type': {'key':", "EncoderCustomPreset, EncoderSystemPreset. All required parameters must be populated in order", "{'readonly': True}, 'required_members': {'readonly': True}, } _attribute_map = { 'id':", "video resource of type 'file'. All required parameters must be", "'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__( self,", "_subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def", "Indicates whether regional MDM account is enabled. :vartype enable_regional_mdm_account: bool", "private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state of the private", "Analyzer's list of trusted authorities should be used. :type trusted_certificates:", "'name': {'key': 'name', 'type': 'str'}, } def __init__( self, **kwargs", "'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, } def", "kwargs.get('reason', None) self.message = kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): \"\"\"Base class", "action_type: Indicates the action type. Possible values include: \"Internal\". :type", ":type topology_name: str :param description: An optional description for the", "= None self.display_name = None self.display_description = None self.unit =", "/manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording can be played in", "access for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map =", "CredentialsBase(msrest.serialization.Model): \"\"\"Base class for credential objects. You probably want to", "to different values, such as individual cameras' RTSP endpoints and", "values include: \"ES256\", \"ES384\", \"ES512\". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo", "'[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def", "recipe or instructions on how audio should be processed. You", "= None self.flags = None self.content_urls = None self.media_info =", "of the topology sink nodes. Sink nodes allow pipeline data", "storage account. :param value: Array of private endpoint connections. :type", "kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status', None)", "= { 'value': {'key': 'value', 'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink',", "'str'}, 'type_properties_type': {'key': 'properties.type', 'type': 'str'}, 'flags': {'key': 'properties.flags', 'type':", "__init__( self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None)", "= kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic log emitted by", "} def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider =", "a sequence of datetime ranges. The video source only picks", "or reset. Variables are only populated by the server, and", "parameter. :type description: str :param default: The default value for", "items. :param value: A collection of EdgeModuleEntity items. :type value:", "__init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None)", "request. :ivar name: The metric name. :vartype name: str :ivar", "'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type':", "LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline represents a unique instance of a live", "resource needs to be created on the service. These will", "self.kind = kwargs['kind'] self.sku = kwargs['sku'] self.description = kwargs.get('description', None)", "where \"kind\" is set to \"live\". :param disable_archive: When set", "= kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): \"\"\"The service metric specifications. Variables are", "self).__init__(**kwargs) self.type = None # type: Optional[str] self.kid = kwargs['kid']", "Account Key. Possible values include: \"SystemKey\", \"CustomerKey\". :type type: str", "\"Double\", \"Bool\". :type type: str or ~video_analyzer.models.ParameterType :param description: Description", "preview image URL. :type small: str :param medium: Medium resolution", "token expiration date. :type expiration_date: ~datetime.datetime \"\"\" _validation = {", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class VideoEntity(ProxyResource): \"\"\"Represents", "of tags. Resource tags. :type tags: dict[str, str] :param identity:", "should follow IS08601, and the sum of the ranges should", "{'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps':", "'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def", ":param value: A collection of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline]", "'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs", "parameter value of an specific pipeline topology parameter. See pipeline", "and audio to be captured, optionally archived, and published via", "Analyzer resource. Variables are only populated by the server, and", ":param frame_rate: The frame rate (in frames per second) of", "this specific pipeline. :type value: str \"\"\" _validation = {", "connection has been Approved/Rejected/Removed by the owner of the service.", "True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'tags': {'key':", "= { 'type': {'readonly': True}, 'info': {'readonly': True}, } _attribute_map", "activated, this live pipeline will process content according to the", "str class RtspSource(SourceNodeBase): \"\"\"RTSP source allows for media from an", "key without a version (for example https://vault/keys/mykey). :type key_identifier: str", "class AccessPolicyEntity(ProxyResource): \"\"\"Access policies help define the authentication rules, and", "'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'},", "used: RS256, RS384 or RS512. Possible values include: \"RS256\", \"RS384\",", "archive in different resolutions. They are available when the video", "header. :type kid: str \"\"\" _validation = { 'type': {'required':", ":param segment_length: Video segment length indicates the length of individual", "UsernamePasswordCredentials. All required parameters must be populated in order to", "AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to", "'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key':", "} def __init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type =", "the topology to be described here. :type description: str :param", "definition for an Azure Resource Manager tracked top level resource", ":vartype name: str :ivar status: The status of the pipeline", "key_identifier: Required. The URL of the Key Vault key used", "description: Description of the parameter. :type description: str :param default:", "the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information", ":param value: Array of private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection]", "TokenKey(msrest.serialization.Model): \"\"\"Key properties for JWT token validation. You probably want", "list of PEM formatted certificates. All required parameters must be", "Indicates the action type. Possible values include: \"Internal\". :type action_type:", "string in order to prevent this value to be returned", "while increasing the archive playback latency. Value must be specified", "topology, this allows for video and audio to be stored", ":param endpoint: Required. RTSP endpoint information for Video Analyzer to", "type: Required. Type of the parameter. Possible values include: \"String\",", "information. # Code generated by Microsoft (R) AutoRest Code Generator.", "an RTSP source which allows for content to be ingested", "service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification \"\"\" _validation = { 'service_specification':", "set to 'true', then \"disableArchive\" must be set to 'false'.", "'str'}, } def __init__( self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type", ":type error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = { 'name': {'required': True},", "self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier =", "name: str or ~video_analyzer.models.SkuName :ivar tier: The SKU tier. Possible", "str self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options', None) class", "to periodically connect to the cloud. A new provisioning token", "IoT Hub. :type iot_hub_name: str :param device_id: Required. The IoT", "collection of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A", "kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None class", "{'readonly': True}, 'system_data': {'readonly': True}, 'type_properties_type': {'readonly': True}, 'flags': {'readonly':", "of user defined parameters that can be references across the", "None) self.identity = kwargs.get('identity', None) self.status = None class AudioEncoderBase(msrest.serialization.Model):", "True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key':", "'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map", "kwargs.get('title', None) self.description = kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None)", "different applications and scenarios. Possible values include: \"Archive\", \"File\". :vartype", "nodes which perform data analysis or transformations. * Sinks: list", "exchange: TCP or HTTP. When using TCP, the RTP packets", "self.created_by_type = kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at', None) self.last_modified_by =", "of the pipeline job operation. :vartype name: str :ivar status:", "The ARM identifier for Private Endpoint. :vartype id: str \"\"\"", "connect to. This contains the required information for Video Analyzer", "'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "current video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of", "many pipeline instances which share the same processing characteristics. For", "'status': {'key': 'status', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'},", "= None self.aggregation_type = None self.lock_aggregation_type = None self.supported_aggregation_types =", "{'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls':", "= kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): \"\"\"Video preview image URLs. These", "video type is 'archive' and preview images are enabled. :param", "~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state of the private endpoint", "Description of the parameter. :type description: str :param default: The", "'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name", ":type sinks: list[~video_analyzer.models.SinkNodeBase] \"\"\" _validation = { 'id': {'readonly': True},", "None self.flags = None self.content_urls = None self.media_info = kwargs.get('media_info',", "given values. :type issuers: list[str] :param audiences: List of expected", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = kwargs['video_name']", "{'readonly': True}, 'principal_id': {'readonly': True}, } _attribute_map = { 'client_id':", "class SinkNodeBase(NodeBase): \"\"\"Base class for topology sink nodes. You probably", "_validation = { 'key_identifier': {'required': True}, 'current_key_identifier': {'readonly': True}, }", "\"\"\"Options for controlling the validation of TLS endpoints. :param ignore_hostname:", "job. Variables are only populated by the server, and will", "Value indicating whether or not there has ever been data", "by the owner of the service. Possible values include: \"Pending\",", "self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link", "kwargs.get('description', None) self.actions_required = kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): \"\"\"Metric properties.", "location: Required. The geo-location where the resource lives. :type location:", "and audio to be stored as a file, and published", "to be applied when processing content for a particular outcome.", "VideoSink. All required parameters must be populated in order to", "filled by server. :type type: str \"\"\" _validation = {", "the given name is available. :type message: str \"\"\" _attribute_map", "include: \"Enabled\", \"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control:", "A message indicating if changes on the service provider require", "a secret string in order to prevent this value to", "self).__init__(**kwargs) self.name = None self.display_name = None self.display_description = None", "public network access is allowed for specified resources under the", "send to Azure. :param name: Required. The operation name. :type", "str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): \"\"\"The", "'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties',", "_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key':", "= None self.parameters = kwargs.get('parameters', None) class PipelineTopology(ProxyResource): \"\"\"Pipeline topology", "current status of the storage account mapping. :vartype status: str", ":param authentication: Authentication method to be used when validating client", "enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The source MDM account.", "EndpointBase(msrest.serialization.Model): \"\"\"Base class for endpoints. You probably want to use", "the RTSP messages. Possible values include: \"Http\", \"Tcp\". :type transport:", "must be specified. Else if the mode is 'PreserveAspectRatio' then", "'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} }", "{'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, }", "LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of an operation on", "blob to be provided to the Azure Video Analyzer IoT", "Optional[str] class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a custom preset for encoding the", "be unique within the topology. :type name: str :param inputs:", "'value': {'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'},", "Validation keys are looked up based on the key id", "'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type':", "class EdgeModuleEntity(ProxyResource): \"\"\"The representation of an edge module. Variables are", "video should be encoded. If omitted, encoder sets it automatically", "The current status of the Iot Hub mapping. :vartype status:", "~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar", "{'key': 'tier', 'type': 'str'}, } def __init__( self, **kwargs ):", "} _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__(", "__init__( self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None self.status", "operation. :vartype status: str :ivar error: The error details for", "'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'},", "(read-only). Possible values include: \"Inactive\", \"Activating\", \"Active\", \"Deactivating\". :vartype state:", "be stored or exported to other destinations. Variables are only", "None # type: Optional[str] self.kid = kwargs['kid'] class EccTokenKey(TokenKey): \"\"\"Required", "str :ivar error: The error details for the pipeline job", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset':", "order to send to Azure. :param type: Required. The discriminator", "{'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state':", "endpoints. :param ignore_hostname: When set to 'true' causes the certificate", "{'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections':", "= { 'type': {'required': True}, 'name': {'required': True}, 'endpoint': {'required':", "pipeline job will process content according to the pipeline topology", "archived content. Variables are only populated by the server, and", "def __init__( self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey'", "self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model): \"\"\"An operation. All required parameters", "Azure Video Analyzer IoT edge module. All required parameters must", "default values and can later be defined in individual instances", "_attribute_map = { 'value': {'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key':", "'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__(", "It is recommended that this value is parameterized as a", "segments reduce the amount of storage transactions while increasing the", "a RTSP camera and archives the content can be reused", "None self.system_data = None class ProxyResource(Resource): \"\"\"The resource model definition", "are: EncoderCustomPreset, EncoderSystemPreset. All required parameters must be populated in", "**kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str", "= kwargs.get('description', None) self.type_properties_type = None self.flags = None self.content_urls", "not this class directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All", "\"Int\", \"Double\", \"Bool\". :type type: str or ~video_analyzer.models.ParameterType :param description:", "trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options to use when authenticating", "for Private Endpoint. :vartype id: str \"\"\" _validation = {", ":param sources: List of the topology source nodes. Source nodes", "specified in the sequence. All required parameters must be populated", "of the storage account mapping. :vartype status: str \"\"\" _validation", "{'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type':", "\"\"\"Properties for expected token claims. All required parameters must be", "Default value is 30 seconds. This property is only allowed", "format (i.e. \"P1D\" equals 1 day) and can vary between", "up to 2048 characters long. :type description: str :param segment_length:", "= None # type: Optional[str] self.kid = kwargs['kid'] class EccTokenKey(TokenKey):", "): super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role', None) self.authentication = kwargs.get('authentication',", "be created on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options:", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key':", "conjunction with the video content authorization token to expose a", "self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None self.parameters = kwargs.get('parameters',", "): super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers", "any standard media player. It is available when the video", "self.name = kwargs['name'] self.display = kwargs.get('display', None) self.origin = kwargs.get('origin',", "'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key':", "= kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): \"\"\"Video", "message: Detailed reason why the given name is available. :type", "None) self.default = kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the parameter", "or more data sources nodes such as an RTSP source", "controlling the validation of TLS endpoints. :param ignore_hostname: When set", "\"\"\"Access policies help define the authentication rules, and control access", "= kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None", "\"\"\"The input parameters to generate registration token for the Azure", "reserved for the live pipeline. The allowed range is from", "endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption:", "} def __init__( self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type =", "of 10 years. For example, if this is set to", "class TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence of datetime ranges as a string.", "example, it can used to change the resolution from 4K", "live content can be automatically played by the Azure Video", "processing. When activated, this live pipeline will process content according", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True},", "DASH or HLS players by appending the following to the", "**kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource =", "be played in \"live mode\" with latencies which are approximately", "shoebox. :vartype to_be_exported_for_shoebox: bool \"\"\" _validation = { 'name': {'readonly':", "{'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data':", "server. :type type: str :param iot_hub_name: Required. Name of the", ".. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) -", "description: str :param default: The default value for the parameter", "~datetime.datetime :ivar token: The content token value to be added", "\"RS256\", \"RS384\", \"RS512\". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n:", "namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types: The supported time grain", "Password to be presented as part of the credentials. It", "name. :type display: ~video_analyzer.models.OperationDisplay :param origin: Origin of the operation.", ":vartype blob_duration: str \"\"\" _validation = { 'name': {'readonly': True},", ":type operation: str :param description: The operation description. :type description:", "establishing the remote tunnel. This string is case-sensitive. :type device_id:", "primary storage account must be a Standard Storage account (either", "{'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description':", "ID. :type id: str :param start_time: Operation start time. :type", "True}, } _attribute_map = { 'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},", "kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information about the", "self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints", "**kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description =", "): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description',", "= { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value',", "} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value':", "of trusted authorities should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param", "{'key': 'scale', 'type': 'VideoScale'}, } def __init__( self, **kwargs ):", "'TlsValidationOptions'}, } def __init__( self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type", "class TrackedResource(Resource): \"\"\"The resource model definition for an Azure Resource", "inputs: Required. An array of upstream node references within the", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid':", "Exported videos can be downloaded as MP4 files. Variables are", ":param identity: Required. The IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity", ":param value: Array of private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource]", "have tags and a location. Variables are only populated by", "encoder processor. All required parameters must be populated in order", "'display_name': {'readonly': True}, 'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type':", "{'key': 'systemData', 'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type': 'str'}, 'required_members':", "{'readonly': True}, 'info': {'readonly': True}, } _attribute_map = { 'type':", "end time. :type end_time: str :param status: Operation status. :type", "which captures content from a RTSP camera and archives the", "self.large = kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags used to", "'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__(", "~video_analyzer.models.UserAssignedManagedIdentity] \"\"\" _validation = { 'type': {'required': True}, } _attribute_map", "sub-classes are: RtspSource, VideoSource. All required parameters must be populated", "allows for encoding of the input content. For example, it", "later be defined in individual instances of the pipeline. :type", "of the credentials. It is recommended that this value is", "bool :param action_type: Indicates the action type. Possible values include:", "which share the same processing characteristics. For instance, a pipeline", "on the live pipeline. Variables are only populated by the", "True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key':", "'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, }", "topology. :type name: str :param value: Parameter value to be", "'token': {'key': 'token', 'type': 'str'}, } def __init__( self, **kwargs", "Required. The sequence of datetime ranges. Example: '[[\"2021-10-05T03:30:00Z\", \"2021-10-05T03:40:00Z\"]]'. :type", "* Sinks: list of one or more data sinks which", "with the video content authorization token to expose a WebSocket", "} _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'ranges':", "These will not take effect if the video already exists.", "TokenClaim(msrest.serialization.Model): \"\"\"Properties for expected token claims. All required parameters must", "the quality of the input video. :type bitrate_kbps: str :param", "createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The", "state of the connection between service consumer and provider. :param", "is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions \"\"\" _validation = { 'type':", "None class EdgeModuleEntityCollection(msrest.serialization.Model): \"\"\"A collection of EdgeModuleEntity items. :param value:", "include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\", \"SingleLayer_2160p_H264_AAC\". :type name: str or ~video_analyzer.models.EncoderSystemPresetType", "Required. Value indicating whether or not the video is currently", "type: Required. The type of the endpoint. Possible values include:", "self.enable_regional_mdm_account = None self.source_mdm_account = None self.source_mdm_namespace = None self.supported_time_grain_types", "'description': {'key': 'description', 'type': 'str'}, } def __init__( self, **kwargs", "the token expiration date. :type expiration_date: ~datetime.datetime \"\"\" _validation =", ":param provider: The service provider. :type provider: str :param resource:", "lock_aggregation_type: The metric lock aggregation type. Possible values include: \"Average\",", "all encoder presets, which define the recipe or instructions on", "'VideoPreviewImageUrls'}, } def __init__( self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url", "been declared in the referenced topology. Topology parameters without a", "claims to be validated. Token must contains all claims and", "the Video Analyzer edge module. :vartype edge_module_id: str \"\"\" _validation", "to be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys: List of", "when processing content for a particular outcome. The topology should", "location: str :param identity: The identities associated to the Video", "when validating client API access. :type authentication: ~video_analyzer.models.AuthenticationBase \"\"\" _validation", "the parameter to be used if the pipeline does not", "# type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): \"\"\"A", "'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs)", "the encoder processor. All required parameters must be populated in", "is 'archive' and a live, low-latency feed is available from", "= None self.display_name = None self.blob_duration = None class MetricDimension(msrest.serialization.Model):", "30 seconds. This property is only allowed for topologies where", "error response for all Azure Resource Manager APIs to return", "including the key version. :vartype current_key_identifier: str \"\"\" _validation =", "{ 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self,", "super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name =", "ID. :vartype client_id: str :ivar principal_id: The principal ID. :vartype", "**kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description =", "super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error =", "'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, } def __init__(", "link resource Private link DNS zone name. :type required_zone_names: list[str]", "metric display name. :vartype display_name: str :ivar display_description: The metric", "{ 'value': {'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type':", "\"\"\"Base class for endpoints. You probably want to use the", "'type': 'str'}, } def __init__( self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs)", "kwargs.get('parameters', None) class PipelineTopology(ProxyResource): \"\"\"Pipeline topology describes the processing steps", "self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None) class", "must be populated in order to send to Azure. :ivar", ":type width: str :param mode: Describes the video scaling mode", "# type: Optional[str] self.name = kwargs['name'] class ProcessorNodeBase(NodeBase): \"\"\"Base class", "'n', 'type': 'str'}, 'e': {'key': 'e', 'type': 'str'}, } def", "details. All required parameters must be populated in order to", "amount of storage transactions while increasing the archive playback latency.", "in order to send to Azure. :param key_identifier: Required. The", "string is case-sensitive. :type device_id: str \"\"\" _validation = {", "'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type':", ":type type: str or ~video_analyzer.models.ParameterType :param description: Description of the", "private endpoint connection resource. Possible values include: \"Succeeded\", \"Creating\", \"Deleting\",", "{'key': 'description', 'type': 'str'}, 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, }", "for an Azure Resource Manager tracked top level resource which", "resource model definition for an Azure Resource Manager tracked top", ":type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map = { 'public_network_access':", "_attribute_map = { 'client_id': {'key': 'clientId', 'type': 'str'}, 'principal_id': {'key':", "the instance level parameter values for the user-defined topology parameters.", "instance of the Video Analyzer edge module. :vartype edge_module_id: str", "= kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model): \"\"\"List of", "'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def __init__(", "None) self.error = kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): \"\"\"The update operation", "filled by server. :type type: str :param username: Required. Username", "SinkNodeBase(NodeBase): \"\"\"Base class for topology sink nodes. You probably want", "endpoint_url: The URL of the endpoint. :type endpoint_url: str :param", "None self.parameters = kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection of", "populated in order to send to Azure. :param user_assigned_identity: Required.", "can used to change the resolution from 4K to 1280x720.", "'type': 'TunnelBase'}, } def __init__( self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs)", "within the topology to be used as inputs for this", "'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set", "__init__( self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None self.token", "'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self,", "self.segment_length = kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model):", ":type type: str :param user_assigned_identities: The User Assigned Managed Identities.", "(for example https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier: The current", "encoder preset, which defines the recipe or instructions on how", "'system_data': {'readonly': True}, 'state': {'readonly': True}, } _attribute_map = {", "{'key': 'issuers', 'type': '[str]'}, 'audiences': {'key': 'audiences', 'type': '[str]'}, 'claims':", "class TlsEndpoint(EndpointBase): \"\"\"TLS endpoint describes an endpoint that the pipeline", "characters long. :type title: str :param description: Optional video description", ":vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource", "'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'}, } def __init__( self,", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key':", "containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind:", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} }", "self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link", "to send to Azure. :param can_stream: Required. Value indicating whether", "self.unit = None self.aggregation_type = None self.lock_aggregation_type = None self.supported_aggregation_types", "'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__(", "parameter values for the user-defined topology parameters. A pipeline can", "metric to shoebox. :vartype to_be_exported_for_shoebox: bool \"\"\" _validation = {", "kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): \"\"\"Pipeline topology", "= { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature',", "type: str :param user_assigned_identities: The User Assigned Managed Identities. :type", "kwargs.get('provider', None) self.resource = kwargs.get('resource', None) self.operation = kwargs.get('operation', None)", "representation of an edge module. Variables are only populated by", "} def __init__( self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type =", "= { 'name': {'required': True}, 'tier': {'readonly': True}, } _attribute_map", "True}, 'state': {'readonly': True}, } _attribute_map = { 'id': {'key':", "None) self.reason = kwargs.get('reason', None) self.message = kwargs.get('message', None) class", "def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = kwargs['name']", "= { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'blob_duration': {'readonly':", "audience is valid if it matches at least one of", "ignored when sending a request. :ivar log_specifications: List of log", "to be validated. Token must contains all claims and respective", "'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'}, } def", "the expected use of the topology to be described here.", "generated by Microsoft (R) AutoRest Code Generator. # Changes may", "include: \"SystemKey\", \"CustomerKey\". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties:", "{'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, }", "order to send to Azure. :param key_identifier: Required. The URL", "{'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival':", "collection of information about the state of the connection between", "{'key': 'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ):", "key_vault_properties: The properties of the key used to encrypt the", "processing characteristics. For instance, a pipeline topology which captures content", ":ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or", "an input signal to be used on a pipeline node.", "the processing steps to be applied when processing content for", "then no content is archived. :type video_name: str :param video_creation_properties:", "Value indicating whether or not the video is currently being", "Required. The name of the upstream node in the pipeline", "not this class directly. Known sub-classes are: RtspSource, VideoSource. All", "\"disableArchive\" must be set to 'false'. :type disable_rtsp_publishing: str \"\"\"", "Video Analyzer edge module. :vartype edge_module_id: str \"\"\" _validation =", "stored or exported to other destinations. Variables are only populated", "this policy. Possible values include: \"Reader\". :type role: str or", "is available. :type message: str \"\"\" _attribute_map = { 'name_available':", "{'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url':", "~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock aggregation type. Possible values", "source, and if disableArchive is set to true, then no", "using HTTP, the RTSP messages are exchanged through long lived", ":vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections", "to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param", "data analysis or transformations. * Sinks: list of one or", "ignore_signature: When set to 'true' causes the certificate chain trust", "reused across many different cameras, as long as the same", "RTSP source, and if disableArchive is set to true, then", "VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map = { 'value':", "message indicating if changes on the service provider require any", ":param origin: Origin of the operation. :type origin: str :param", "This contains the required information for Video Analyzer to connect", "needs to be checked. :type name: str :param type: The", "kind: Required. Topology kind. Possible values include: \"Live\", \"Batch\". :type", "= { 'can_stream': {'key': 'canStream', 'type': 'bool'}, 'has_data': {'key': 'hasData',", "'type': '{str}'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts',", "by appending the following to the base URL: .. code-block::", "kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None)", "Medium resolution preview image URL. :type medium: str :param large:", "kwargs['id'] self.identity = kwargs.get('identity', None) self.status = None class SystemData(msrest.serialization.Model):", "requests. :type password: str \"\"\" _validation = { 'type': {'required':", "None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase):", "The metric display description. :vartype display_description: str :ivar unit: The", "~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource. :type", "client API access. :type authentication: ~video_analyzer.models.AuthenticationBase \"\"\" _validation = {", "value: list[~video_analyzer.models.LivePipeline] :param next_link: A link to the next page", "the chosen video segment length. It is available when the", "is used, for example, when the topology is used only", "Parameters: list of user defined parameters that can be references", "= { 'id': {'required': True}, 'status': {'readonly': True}, } _attribute_map", "title: Optional title provided by the user. Value can be", "Describes the video scaling mode to be applied. Default mode", "'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'},", ":param private_endpoint: The resource of private end point. :type private_endpoint:", "'expiration': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = {", "str :param time_sequences: Required. Describes a sequence of datetime ranges.", "True}, 'inputs': {'required': True}, 'preset': {'required': True}, } _attribute_map =", "types. :vartype supported_time_grain_types: list[str] \"\"\" _validation = { 'name': {'readonly':", "The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\". :vartype", "log category name. :vartype name: str :ivar display_name: The diagnostic", "audiences: list[str] :param claims: List of additional token claims to", "pipeline topology. :type name: str :param value: Parameter value to", ":type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source", "to a single video. :vartype token: str \"\"\" _validation =", "'type': 'VideoScale'}, } def __init__( self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs)", "Vault. Variables are only populated by the server, and will", "None) self.resource = kwargs.get('resource', None) self.operation = kwargs.get('operation', None) self.description", "\"Microsoft.Storage/storageAccounts\". :vartype type: str :ivar system_data: Azure Resource Manager metadata", "specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\" _validation = { 'log_specifications': {'readonly':", "last_modified_at: The timestamp of resource last modification (UTC). :type last_modified_at:", "{'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type':", "this class directly. Known sub-classes are: JwtAuthentication. All required parameters", "{'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map = { 'code':", "{'required': True}, 'device_id': {'required': True}, } _attribute_map = { 'type':", "{ 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True},", "geo-location where the resource lives. :type location: str :param identity:", "bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which", "individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources:", "'SystemData'}, 'title': {'key': 'properties.title', 'type': 'str'}, 'description': {'key': 'properties.description', 'type':", "IoT Hub device information. All required parameters must be populated", "Allowed values are 96, 112, 128, 160, 192, 224, and", "Required. Expected value of the claim to be present on", "_attribute_map = { 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def", "Internet prior to the token expiration date. :type expiration_date: ~datetime.datetime", "certificates. All required parameters must be populated in order to", "be used to validate access tokens. Having multiple keys allow", "self.parameters = kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): \"\"\"A collection of LivePipeline", "\"\"\" _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetail'}, }", "'true', then \"disableArchive\" must be set to 'false'. :type disable_rtsp_publishing:", "created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at: The timestamp of resource", "__init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name", "of resource last modification (UTC). :type last_modified_at: ~datetime.datetime \"\"\" _attribute_map", ":type ignore_hostname: str :param ignore_signature: When set to 'true' causes", "super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class VideoContentUrls(msrest.serialization.Model):", "__init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name = None self.display_name", "{'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type': 'VideoArchival'}, }", "_attribute_map = { 'disable_archive': {'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key':", "to be added to the video content URL as the", "this URL can be used in conjunction with the video", "= { 'name': {'required': True}, } _attribute_map = { 'name':", "validation properties for tokens generated with RSA algorithm. All required", "**kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str", "may either be versioned (for example https://vault/keys/mykey/version1) or reference a", "member names. :vartype required_members: list[str] :param required_zone_names: The private link", "or not the video is currently being referenced be an", "'content_urls': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", "module must be initialized and connected to the Internet prior", "def __init__( self, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags',", "across all the cameras. Individual instance properties can be defined", "True}, 'expiration': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map =", "self.credentials = kwargs['credentials'] self.url = kwargs['url'] self.tunnel = kwargs.get('tunnel', None)", "str :ivar to_be_exported_for_shoebox: Whether to export metric to shoebox. :vartype", "**kwargs ): super(TokenKey, self).__init__(**kwargs) self.type = None # type: Optional[str]", "authentication: Authentication method to be used when validating client API", "the pipeline job operation. :vartype name: str :ivar status: The", "not this class directly. Known sub-classes are: JwtAuthentication. All required", ":type name_available: bool :param reason: The reason why the given", "be used in case a new video resource needs to", "can be referenced throughout the topology nodes through the use", "**kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str]", "more data sinks which allow for data to be stored", "'@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'}", "Possible values include: \"Live_S1\", \"Batch_S1\". :type name: str or ~video_analyzer.models.SkuName", "self.status = None class JwtAuthentication(AuthenticationBase): \"\"\"Properties for access validation based", "to use the sub-classes and not this class directly. Known", "\"Succeeded\", \"Creating\", \"Deleting\", \"Failed\". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\"", "True}, 'flags': {'readonly': True}, 'content_urls': {'readonly': True}, } _attribute_map =", "= kwargs.get('resource', None) self.operation = kwargs.get('operation', None) self.description = kwargs.get('description',", "'deviceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel,", "str :param iot_hub_name: Required. Name of the IoT Hub. :type", "media player. It is available when the video type is", "\"Failed\". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time", ":ivar info: The additional info. :vartype info: any \"\"\" _validation", "def __init__( self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind']", "details of the user assigned managed identity used by the", "RTSP source which allows for content to be ingested from", "self.sinks = kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection of PipelineTopology", "self, **kwargs ): super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type:", "player widget. Alternatively, this URL can be used in conjunction", "self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature", "'y': {'key': 'y', 'type': 'str'}, } def __init__( self, **kwargs", "built-in preset for encoding the input content using the encoder", "'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, **kwargs", "'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'},", "Key Vault. Variables are only populated by the server, and", "characters long. :type description: str :ivar type_properties_type: Video content type.", "'[str]'}, } def __init__( self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id", "PipelineTopology(ProxyResource): \"\"\"Pipeline topology describes the processing steps to be applied", "\"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\". :vartype type: str :ivar system_data: Azure Resource", "True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key':", "'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type',", "a Azure Resource Manager proxy resource. It will not have", "between IoT edge module and the cloud. After the initial", "this class directly. Known sub-classes are: PemCertificateList. All required parameters", "'type': {'key': 'type', 'type': 'str'}, 'info': {'key': 'info', 'type': 'object'},", "{'key': 'value', 'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, }", "{'key': 'n', 'type': 'str'}, 'e': {'key': 'e', 'type': 'str'}, }", "kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): \"\"\"The check availability result. :param name_available:", "equals 1 day) and can vary between 1 day to", "different values, such as individual cameras' RTSP endpoints and credentials.", "TimeSequenceBase(msrest.serialization.Model): \"\"\"A sequence of datetime ranges as a string. You", "only with batch pipelines. All required parameters must be populated", "to return error details for failed operations. (This also follows", "super(TimeSequenceBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class TlsEndpoint(EndpointBase):", "how the video sink publishes content via the video resource.", "= { 'type': {'key': '@type', 'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName',", "the input video. :type scale: ~video_analyzer.models.VideoScale \"\"\" _validation = {", "{'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication':", "height must be specified. Else if the mode is 'PreserveAspectRatio'", "VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content token grants access to the video content", "self, **kwargs ): super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type:", "tags. Resource tags. :type tags: dict[str, str] :param location: Required.", "date of the registration token. The Azure Video Analyzer IoT", "status of the live pipeline operation. :vartype status: str :ivar", "'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__( self, **kwargs ): super(VideoEncoderBase,", "log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name = kwargs['iot_hub_name']", "encoding presets, which define the recipe or instructions on how", "None class ErrorDetail(msrest.serialization.Model): \"\"\"The error detail. Variables are only populated", "'AuthenticationBase'}, } def __init__( self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role", "'small': {'key': 'small', 'type': 'str'}, 'medium': {'key': 'medium', 'type': 'str'},", "~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map = { 'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},", "reason why the given name is available. :type message: str", "Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase", "job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the", "} _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message':", "24 hours or less. Currently, there can be only one", "'[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type':", "'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key':", "'str'}, 'default': {'key': 'default', 'type': 'str'}, } def __init__( self,", "preview image URLs. These URLs can be used in conjunction", "= { 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required':", "state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs to", "diagnostic log emitted by service. Variables are only populated by", ":ivar group_id: The private link resource group id. :vartype group_id:", ":type action_type: str or ~video_analyzer.models.ActionType \"\"\" _validation = { 'name':", "to a maximum of 10 years. For example, if this", "{'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'id':", "of RTSP source, and if disableArchive is set to true,", "password: Required. Password to be presented as part of the", "'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'}", "images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map = {", ":vartype display_name: str :ivar to_be_exported_for_shoebox: Whether to export metric to", "state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance", "**kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None self.metric_specifications = None", "nodes. * Sources: list of one or more data sources", "= kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): \"\"\"Base", "not this class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required", "be ignored when sending a request. :ivar client_id: The client", "\"Bytes\", \"Count\", \"Milliseconds\". :vartype unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type:", "'type': 'str'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts',", "the value for the \"token\" query string parameter. The token", "references across the topology nodes. * Sources: list of one", "be ignored when sending a request. :ivar type: The additional", "for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints", "at least one of the given values. :type audiences: list[str]", "for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network", "endpoint that the pipeline can connect to over TLS transport", "Video Analyzer video resource to be ingested into a pipeline.", ":type preset: ~video_analyzer.models.EncoderPresetBase \"\"\" _validation = { 'type': {'required': True},", "actions_required: str \"\"\" _attribute_map = { 'status': {'key': 'status', 'type':", "True}, 'system_data': {'readonly': True}, 'location': {'required': True}, 'endpoints': {'readonly': True},", "): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type", "credential objects. You probably want to use the sub-classes and", "'EncoderPresetBase'}, } def __init__( self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type", "__init__( self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value", "of the given values. :type issuers: list[str] :param audiences: List", "True}, 'type': {'required': True}, } _attribute_map = { 'name': {'key':", "follow IS08601, and the sum of the ranges should add", "public certificates. One certificate per entry. :type certificates: list[str] \"\"\"", "checking to see if the camera bitrate is now below", "def __init__( self, **kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name',", "file download URL. This URL can be used in conjunction", "str self.transport = kwargs.get('transport', None) self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model):", "end_time: Operation end time. :type end_time: str :param status: Operation", "class directly. Known sub-classes are: PemCertificateList. All required parameters must", "{'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'}, }", "\"Http\", \"Tcp\". :type transport: str or ~video_analyzer.models.RtspTransport :param endpoint: Required.", "server. :type type: str :param bitrate_kbps: The maximum bitrate, in", "items. :type value: list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map = { 'value': {'key':", "MetricSpecification(msrest.serialization.Model): \"\"\"A metric emitted by service. Variables are only populated", "{'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties':", ":type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access for consumption", "properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for", "None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model):", "kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors', None)", "\"PT30S\" equals 30 seconds) and can vary between 30 seconds", "topology describes the processing steps to be applied when processing", "Required. The endpoint URL for Video Analyzer to connect to.", "None) class PemCertificateList(CertificateSource): \"\"\"A list of PEM formatted certificates. All", "\"Failed\". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation = {", "class NodeInput(msrest.serialization.Model): \"\"\"Describes an input signal to be used on", "= kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity', None)", "include: \"Enabled\", \"Disabled\". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess \"\"\" _attribute_map", "{'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, 'is_data_action':", "endpoint connection operation. All required parameters must be populated in", "'type': 'str'}, } def __init__( self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs)", "items. :param value: A collection of VideoAnalyzer items. :type value:", "'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs)", "provider: The service provider. :type provider: str :param resource: Resource", "= { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__( self, **kwargs", "'type': 'VideoArchival'}, } def __init__( self, **kwargs ): super(VideoEntity, self).__init__(**kwargs)", "= kwargs['id'] self.identity = kwargs['identity'] self.status = None class JwtAuthentication(AuthenticationBase):", "\"\"\"Details about the error for a failed pipeline job. :param", "str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for", "within the topology. :type name: str :param transport: Network transport", "by server. :type type: str :param audio_encoder: Describes a custom", "self.has_data = kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): \"\"\"Contains information", "\"\"\"A custom preset for encoding audio with the AAC codec.", "= { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__( self, **kwargs", "individual video files (segments) which are persisted to storage. Smaller", "for the parameter to be used if the pipeline does", "Analyzer to connect to. This contains the required information for", "relies on tables, queues, and blobs. The primary storage account", "definition. :type topology_name: str :param description: An optional description for", "be populated in order to send to Azure. :ivar id:", "expected token audiences. Token audience is valid if it matches", "{'required': True}, 'n': {'required': True}, 'e': {'required': True}, } _attribute_map", ":type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom preset for", "'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key':", "str :param ignore_signature: When set to 'true' causes the certificate", "to Azure. :param type: Required. The identity type. :type type:", "self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model): \"\"\"The", "being referenced be an active pipeline. The fact that is", "): super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class", "\"\"\" _validation = { 'service_specification': {'readonly': True}, } _attribute_map =", "request. :ivar name: The name of the pipeline job operation.", "**kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str", "created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation", "days will be periodically deleted. This value can be updated", "values should follow IS08601, and the sum of the ranges", "'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } _subtype_map = { 'type':", "in a video of type 'archive'. If used in a", ":param action_type: Indicates the action type. Possible values include: \"Internal\".", "streaming URL. The live content can be automatically played by", "when accessing a resource. All required parameters must be populated", "'properties.role', 'type': 'str'}, 'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'}, } def", "'archival': {'key': 'properties.archival', 'type': 'VideoArchival'}, } def __init__( self, **kwargs", "include: \"Processing\", \"Canceled\", \"Completed\", \"Failed\". :vartype state: str or ~video_analyzer.models.PipelineJobState", "the recipe or instructions on how the input video should", "= kwargs.get('parameters', None) class PipelineTopology(ProxyResource): \"\"\"Pipeline topology describes the processing", "token. :type value: str \"\"\" _validation = { 'name': {'required':", "of additional token claims to be validated. Token must contains", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key':", "The type of identity that created the resource. Possible values", "deleted from your account. :vartype expiration: ~datetime.datetime :ivar error: Details", "None) self.description = kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): \"\"\"Single topology parameter", "lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type", "kwargs['identity'] self.status = None class JwtAuthentication(AuthenticationBase): \"\"\"Properties for access validation", "type: Required. The discriminator for derived types.Constant filled by server.", "__init__( self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku", "enabled. :param small: Low resolution preview image URL. :type small:", "expose a WebSocket tunneled RTSP stream. It is available when", "downstream of RTSP source, and if disableArchive is set to", "= { 'node_name': {'required': True}, } _attribute_map = { 'node_name':", "omitted, the encoder uses the average frame rate of the", "} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type':", "the encoder uses the average frame rate of the input", "to connect to. :type url: str :param tunnel: Describes the", "unique instance of a live topology, used for real-time ingestion,", "None) class LogSpecification(msrest.serialization.Model): \"\"\"A diagnostic log emitted by service. Variables", "recent still image from the video archive in different resolutions.", "True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly': True},", "**kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type =", "example, when the topology is used only for low latency", "analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List", "kwargs.get('name_available', None) self.reason = kwargs.get('reason', None) self.message = kwargs.get('message', None)", "None) self.message = kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking", "def __init__( self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None", "{'required': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type':", "'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} }", "'scale': {'key': 'scale', 'type': 'VideoScale'}, } _subtype_map = { 'type':", "'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'preset': {'key': 'preset', 'type': 'EncoderPresetBase'},", "**kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None self.token = None", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology kind.", "include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType", "kwargs['name'] self.value = kwargs.get('value', None) class PemCertificateList(CertificateSource): \"\"\"A list of", "height: str :param width: The desired output video width. :type", "'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'}, } def __init__( self,", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class RtspSource(SourceNodeBase): \"\"\"RTSP", "when authenticating a TLS connection. A null list designates that", "are intended to be kept in storage. It must be", "capacity. Doing so will ensure that one 'noisy neighbor' does", "used when validating client API access. :type authentication: ~video_analyzer.models.AuthenticationBase \"\"\"", "keys which can be used to validate access tokens. Having", "the following to the base URL: .. code-block:: - HLSv4:", "blobs. The primary storage account must be a Standard Storage", "video can be streamed. Only \"archive\" type videos can be", "} _attribute_map = { 'can_stream': {'key': 'canStream', 'type': 'bool'}, 'has_data':", "authentication methods. You probably want to use the sub-classes and", "the RTP packages are interleaved in the HTTP connections alongside", "description: The operation description. :type description: str \"\"\" _attribute_map =", "Video Analyzer account, including the key version. :vartype current_key_identifier: str", "self.status = None self.error = None class PipelineJobUpdate(ProxyResource): \"\"\"Pipeline job", "\"\"\"A diagnostic log emitted by service. Variables are only populated", "the recipe or instructions on how the input content should", "= { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display',", "**kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description =", "str \"\"\" _validation = { 'client_id': {'readonly': True}, 'principal_id': {'readonly':", "description: str :param segment_length: Segment length indicates the length of", "service provider require any updates on the consumer. :type actions_required:", "archiving is enabled, this results in a video of type", "import msrest.serialization class Resource(msrest.serialization.Model): \"\"\"Common fields that are returned in", "{'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, }", "__init__( self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type", "values include: \"User\", \"Application\", \"ManagedIdentity\", \"Key\". :type created_by_type: str or", "~video_analyzer.models.EndpointBase \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':", "system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology kind. Possible values include:", "video resource to be ingested into a pipeline. Currently supported", "super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): \"\"\"The IoT", "types.Constant filled by server. :type type: str :param bitrate_kbps: The", "then both width and height must be specified. Else if", "'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } _subtype_map =", ":param video_creation_properties: Optional video properties to be used in case", "tracking the status of an operation on the pipeline job.", "Required. Name of the parameter. :type name: str :param type:", "'TunnelBase'}, } def __init__( self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type", "set to \"live\". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions \"\"\" _validation = {", "the ranges should add up to 24 hours or less.", "Azure. :param type: Required. The discriminator for derived types.Constant filled", "__init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info", "state of the connection between service consumer and provider. :type", "or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why the given name", "string. You probably want to use the sub-classes and not", "be processed. :type preset: ~video_analyzer.models.EncoderPresetBase \"\"\" _validation = { 'type':", "recommended that the expected use of the topology to be", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs)", "have optional default values and can later be defined in", "Hub mapping. :vartype status: str \"\"\" _validation = { 'id':", "'str'}, } def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name", "__init__( self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' #", "kwargs['e'] class SourceNodeBase(NodeBase): \"\"\"Base class for topology source nodes. You", "= { 'height': {'key': 'height', 'type': 'str'}, 'width': {'key': 'width',", "custom preset for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder:", "'ServiceSpecification'}, } def __init__( self, **kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification", "list[str] \"\"\" _validation = { 'type': {'required': True}, 'certificates': {'required':", "2048 characters long. :type description: str :param segment_length: Segment length", "resource. It will not have tags and a location. Variables", "{'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, }", "~video_analyzer.models.VideoContentUrls :param media_info: Contains information about the video and audio", "'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls',", "Smaller segments provide lower archive playback latency but generate larger", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs)", "{ 'status': {'key': 'status', 'type': 'str'}, 'description': {'key': 'description', 'type':", "of the Video Analyzer edge module. :vartype edge_module_id: str \"\"\"", "self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None) class", "= kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): \"\"\"Status", "= '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = kwargs['video_name'] self.video_creation_properties =", "self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs = kwargs['inputs']", "skipped. Default is 'false'. :type ignore_signature: str \"\"\" _attribute_map =", "super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg =", "str or ~video_analyzer.models.EncoderSystemPresetType \"\"\" _validation = { 'type': {'required': True},", "'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} }", "content processing. When activated, this live pipeline will process content", "= None class JwtAuthentication(AuthenticationBase): \"\"\"Properties for access validation based on", "long. :type description: str :ivar type_properties_type: Video content type. Different", "\"\"\"A list of private link resources. :param value: Array of", "for accessing the encryption keys in Key Vault. Variables are", "and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param title: Optional video", "__init__( self, **kwargs ): super(NodeBase, self).__init__(**kwargs) self.type = None #", "information. All required parameters must be populated in order to", "): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None)", "'name': {'required': True}, 'inputs': {'required': True}, 'preset': {'required': True}, }", "of the topology source nodes. Source nodes enable external data", "'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'n': {'key':", "expiration_date: Required. The desired expiration date of the registration token.", "'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__( self, **kwargs", "self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None self.metric_specifications =", "URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf)", ":type last_modified_at: ~datetime.datetime \"\"\" _attribute_map = { 'created_by': {'key': 'createdBy',", "been data recorded or uploaded into the video. Newly created", "the next page of the collection (when the collection contains", "Required. Value indicating whether or not the video can be", "} def __init__( self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value =", "'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'},", "class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines the parameter value of an specific pipeline", "__init__( self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type = None #", "None self.metric_specifications = None class SinkNodeBase(NodeBase): \"\"\"Base class for topology", "str self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): \"\"\"Describes a built-in preset", "range for requests in each blob. :vartype blob_duration: str \"\"\"", "\"\"\" _validation = { 'type': {'required': True}, 'certificates': {'required': True},", "'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, } def __init__( self,", "disabling low latency streaming. This is used, for example, when", "Kbps in increments of 100 Kbps. If the RTSP camera", "'name': {'required': True}, 'tier': {'readonly': True}, } _attribute_map = {", "The type of key used to encrypt the Account Key.", "is set to \"live\". :param disable_archive: When set to 'true'", "derived types.Constant filled by server. :type type: str :param bitrate_kbps:", "used to encrypt the account. The key may either be", "'time_sequences': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "= kwargs.get('endpoint_url', None) self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model): \"\"\"Base class", "operation. :type origin: str :param properties: Operation properties format. :type", "'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs)", "EncoderProcessor. All required parameters must be populated in order to", "pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity in", "RTSP cameras and/or generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase \"\"\"", "at which video should be encoded. If omitted, encoder sets", "based on JSON Web Tokens (JWT). All required parameters must", "may cause incorrect behavior and will be lost if the", "{'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, }", "metric name. :vartype name: str :ivar display_name: The metric display", "provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation = { 'id': {'readonly':", "def __init__( self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint',", "The date-time by when this pipeline job will be automatically", ":type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for", "'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def", "available when the video type is 'file' and video file", "by the Azure Video Analyzer player widget. Alternatively, this URL", "Detailed reason why the given name is available. :type message:", "(in frames per second) of the encoded video. The value", "super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None)", "'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'},", "connection between service consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar", "archiving content. Default is 'false'. If set to 'true', then", "the live pipeline operation. :vartype status: str :ivar error: The", "'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = kwargs['username'] self.password = kwargs['password']", "None # type: Optional[str] self.credentials = kwargs['credentials'] self.url = kwargs['url']", "generated with RSA algorithm. All required parameters must be populated", "None) self.provisioning_state = None self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model): \"\"\"A", "The error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation =", "def __init__( self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name',", "# type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): \"\"\"Describes a custom preset for", "live pipeline. The allowed range is from 500 to 3000", ":type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype dimensions:", "'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'},", "'properties': {'key': 'properties', 'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'},", "the project root for license information. # Code generated by", "{'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, }", "'value', 'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def", "date-time by when this pipeline job will be automatically deleted", "the built-in encoding preset. Possible values include: \"SingleLayer_540p_H264_AAC\", \"SingleLayer_720p_H264_AAC\", \"SingleLayer_1080p_H264_AAC\",", "be defined in individual instances of the pipeline. :type parameters:", "= { 'service_specification': {'readonly': True}, } _attribute_map = { 'service_specification':", "{ 'type': {'required': True}, 'status': {'readonly': True}, } _attribute_map =", "kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): \"\"\"\"Video content token grants access to", "parameterized. This allows individual pipelines refer to different values, such", "sub-classes are: VideoEncoderH264. All required parameters must be populated in", "double of the chosen video segment length. It is available", "None self.token = None class EncoderPresetBase(msrest.serialization.Model): \"\"\"Base type for all", "{'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__( self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs)", "ignored when sending a request. :ivar code: The error code.", "absent (null), all video content is retained indefinitely. This property", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'type_properties_type': {'readonly': True},", "= None self.token = None class VideoContentUrls(msrest.serialization.Model): \"\"\"Set of URLs", "reason why the given name is not available. Possible values", "for a topology to be parameterized. This allows individual pipelines", "is being referenced, doesn't necessarily indicate that data is being", "self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link", "of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of", "bitrate, in kilobits per second or Kbps, at which video", "directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters must be", "is available from the source. :type rtsp_tunnel_url: str :param preview_image_urls:", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'edge_module_id': {'readonly':", "pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail \"\"\" _validation = {", "**kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str", "self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name']", "'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'},", "~video_analyzer.models.CreatedByType :param created_at: The timestamp of resource creation (UTC). :type", "be applied. Default mode is 'Pad'. If the mode is", "and audio content. :param segment_length: Video segment length indicates the", "{'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'preset':", "'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self, **kwargs", "source. :type rtsp_tunnel_url: str :param preview_image_urls: Video preview image URLs.", "{'key': 'disableRtspPublishing', 'type': 'str'}, } def __init__( self, **kwargs ):", "or uploaded into the video. Newly created videos have this", "self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model):", "LivePipeline(ProxyResource): \"\"\"Live pipeline represents a unique instance of a live", "{'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly':", "be played on any standard media player. It is available", "edge module in case the module state lost or reset.", "str \"\"\" _validation = { 'type': {'required': True}, 'iot_hub_name': {'required':", ":param title: Optional title provided by the user. Value can", "**kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None", "video analyzer operation. All required parameters must be populated in", "one range specified in the sequence. All required parameters must", "description of the pipeline topology. It is recommended that the", "self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = kwargs['certificates'] class", "type: str self.alg = kwargs['alg'] self.n = kwargs['n'] self.e =", "Known sub-classes are: VideoSink. All required parameters must be populated", "individual pipelines refer to different values, such as individual cameras'", "source only picks up recorded media within these ranges. :type", "str :param tunnel: Describes the tunnel through which Video Analyzer", "see if the camera bitrate is now below the reserved", "'description': {'key': 'description', 'type': 'str'}, 'actions_required': {'key': 'actionsRequired', 'type': 'str'},", "the parameter. Possible values include: \"String\", \"SecretString\", \"Int\", \"Double\", \"Bool\".", "range is from 500 to 3000 Kbps in increments of", "def __init__( self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset'", "str self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase): \"\"\"Video sink in a", "str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState \"\"\" _validation = { 'id': {'readonly': True},", "provider require any updates on the consumer. :type actions_required: str", "class VideoContentUrls(msrest.serialization.Model): \"\"\"Set of URLs to the video content. :param", "'str'}, } def __init__( self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value", "__init__( self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None)", "} def __init__( self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name =", "to \"live\". :type segment_length: str :param retention_period: Video retention period", "'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},", "not this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All", "initial handshake, the IoT edge module will agree on a", "_validation = { 'name': {'required': True}, 'value': {'required': True}, }", "None) self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors", "transit). All required parameters must be populated in order to", "consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map = { 'integration': {'key': 'integration', 'type':", "'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type':", "to the scenario to be achieved and can be reused", "str :param name: Required. Name of the built-in encoding preset.", "source allows for content from a Video Analyzer video resource", "to send to Azure. :param user_assigned_identity: Required. The user assigned", "effect if the video already exists. :param title: Optional title", "class Properties(msrest.serialization.Model): \"\"\"Metric properties. Variables are only populated by the", "**kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link =", ":vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] \"\"\" _validation = { 'endpoints': {'readonly': True},", "The operation name. :type name: str :param display: The operation", "'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs)", "= kwargs['type'] class EndpointBase(msrest.serialization.Model): \"\"\"Base class for endpoints. You probably", "a request. :ivar code: The error code. :vartype code: str", "length. It is available when the video type is 'archive'", "string pattern. Parameters can have optional default values and can", "account. :type bitrate_kbps: int :ivar state: Current state of the", "codec. All required parameters must be populated in order to", "{'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, } _attribute_map", "additional_info: The error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation", "token for the Azure Video Analyzer IoT edge module. All", "OperationDisplay(msrest.serialization.Model): \"\"\"Operation details. :param provider: The service provider. :type provider:", "type for all video encoding presets, which define the recipe", "video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change how the video", "scale: ~video_analyzer.models.VideoScale \"\"\" _validation = { 'type': {'required': True}, }", "'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type':", "bool :param has_data: Required. Value indicating whether or not there", "kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning token", "server to be ingested into a pipeline. All required parameters", "the source. :type video_name: str :param time_sequences: Required. Describes a", "'true' causes the certificate chain trust validation to be skipped.", "'str'}, 'e': {'key': 'e', 'type': 'str'}, } def __init__( self,", "{'readonly': True}, 'supported_time_grain_types': {'readonly': True}, } _attribute_map = { 'name':", ":vartype principal_id: str \"\"\" _validation = { 'client_id': {'readonly': True},", "} def __init__( self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status =", ":type type: str :param name: Required. Name of the built-in", "Video low-latency streaming URL. The live content can be automatically", "storage transactions. Larger segments reduce the amount of storage transactions", ":type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink", "~datetime.datetime :ivar error: Details about the error, in case the", "and blobs. The primary storage account must be a Standard", "Type of the parameter. Possible values include: \"String\", \"SecretString\", \"Int\",", "**kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.description =", "through a pipeline job. Videos ingested through live pipelines can", "the user. Value can be up to 256 characters long.", "from azure.core.exceptions import HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model): \"\"\"Common fields", "video. :type frame_rate: str :param scale: Describes the resolution of", "Video archive streaming base URL. The archived content can be", "be encoded (2-channel stereo audio at a sampling rate of", "the HTTP connections alongside the RTSP messages. Possible values include:", "\"Key\". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp", "being received. For example, video recording may be gated on", "endpoint. Possible values include: \"ClientApi\". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType", "'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs)", "{'key': 'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit':", ":param last_modified_at: The timestamp of resource last modification (UTC). :type", "resource: Resource on which the operation is performed. :type resource:", "page of the collection (when the collection contains too many", "<filename>sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation.", ":ivar log_specifications: List of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar", "True}, 'sku': {'required': True}, } _attribute_map = { 'id': {'key':", "**kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str]", "{ 'id': {'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type':", "self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): \"\"\"Required validation properties for tokens", "type of identity that created the resource. Possible values include:", "str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why the given", "RS256, RS384 or RS512. Possible values include: \"RS256\", \"RS384\", \"RS512\".", "self.system_data = None class ProxyResource(Resource): \"\"\"The resource model definition for", "'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key':", "= None # type: Optional[str] self.credentials = kwargs['credentials'] self.url =", "'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key':", "to be applied on this specific pipeline. :type value: str", "provided by the user. Value can be up to 256", "disable_archive: When set to 'true' content will not be archived", "of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link", "self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase): \"\"\"Video sink in a live", "across many different cameras, as long as the same processing", "'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs", "str \"\"\" _validation = { 'expiration_date': {'readonly': True}, 'token': {'readonly':", "{'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access':", "the topology. :type name: str :param inputs: Required. An array", "which defines the recipe or instructions on how the input", "= kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status", "resource last modification (UTC). :type last_modified_at: ~datetime.datetime \"\"\" _attribute_map =", "files. Variables are only populated by the server, and will", "content authorization token on any compatible DASH or HLS players", "valid if it matches at least one of the given", "of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link: A link", ":type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the", "additional info. Variables are only populated by the server, and", "__init__( self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' #", "which define the recipe or instructions on how the input", "per second or Kbps, at which audio should be encoded", "'type': '[str]'}, 'audiences': {'key': 'audiences', 'type': '[str]'}, 'claims': {'key': 'claims',", "__init__( self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None)", "and not this class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All", "VideoContentUrls(msrest.serialization.Model): \"\"\"Set of URLs to the video content. :param download_url:", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'kind': {'required': True}, 'sku':", "identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of", "= None self.error = None class LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline represents", "= '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase):", "request. :ivar name: The name of the live pipeline operation.", "None) class PipelineJobCollection(msrest.serialization.Model): \"\"\"A collection of PipelineJob items. :param value:", ":vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] \"\"\" _validation = { 'log_specifications': {'readonly': True},", "self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type:", "must contains all claims and respective values for it to", "\"Processing\", \"Canceled\", \"Completed\", \"Failed\". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar", "{ 'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'}, } def __init__( self,", "this value set to false. :type has_data: bool :param is_in_use:", "type: str class VideoEntity(ProxyResource): \"\"\"Represents a video resource within Azure", "connection (with exponential backoff), checking to see if the camera", "'title': {'key': 'properties.title', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'},", ":param topology_name: The reference to an existing pipeline topology defined", "{ 'client_id': {'key': 'clientId', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type':", "~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information about the state", "display_description: str :ivar unit: The metric unit. Possible values include:", "topology to be used as inputs for this node. :type", ":type actions_required: str \"\"\" _attribute_map = { 'status': {'key': 'status',", "self.name = None self.display_name = None self.blob_duration = None class", "video resource. This property is only allowed for topologies where", "video type is 'archive' and a live, low-latency feed is", ":ivar provisioning_state: The provisioning state of the private endpoint connection", "'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'preset': {'key': 'preset', 'type':", "_attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key':", "'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint',", "ErrorAdditionalInfo(msrest.serialization.Model): \"\"\"The resource management error additional info. Variables are only", "'str'}, } def __init__( self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type", "allows for content to be ingested from cameras. * Processors:", "Name of a new or existing video resource used to", "type: str self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase): \"\"\"Video sink in", "status: str \"\"\" _validation = { 'id': {'required': True}, 'status':", "'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, } def __init__(", "None) class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection of PipelineTopology items. :param value:", "for approval/rejection of the connection. :type description: str :param actions_required:", "Video Analyzer IoT edge module. All required parameters must be", "_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'status': {'key':", "None) class VideoPublishingOptions(msrest.serialization.Model): \"\"\"Optional flags used to change how video", ":param message: The error message. :type message: str \"\"\" _attribute_map", "referenced throughout the topology nodes through the use of \"${PARAMETER_NAME}\"", "type for all encoder presets, which define the recipe or", "Known sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters must be populated", "code: str :ivar message: The error message. :vartype message: str", "Required. Name of the claim which must be present on", "server. :type type: str :param issuers: List of expected token", "type: str self.username = kwargs['username'] self.password = kwargs['password'] class VideoAnalyzer(TrackedResource):", "value to be added to the video content URL as", "(i.e. \"P1D\" equals 1 day) and can vary between 1", "are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated in", "super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder =", "cause incorrect behavior and will be lost if the code", "can be only one range specified in the sequence. All", "optionally archived, and published via a video resource. If archiving", "expiration_date: ~datetime.datetime :ivar token: The content token value to be", "is set to \"live\". :type segment_length: str :param retention_period: Video", "can be automatically played by the Azure Video Analyzer player", "'id': {'required': True}, 'identity': {'required': True}, 'status': {'readonly': True}, }", "take effect if the video already exists. :param title: Optional", ":param segment_length: Segment length indicates the length of individual content", "a unique instance of a live topology, used for real-time", "video recording can be played in \"live mode\" with latencies", "for all video encoding presets, which define the recipe or", "behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase \"\"\" _validation = {", "**kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs['value']", "between service consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state:", "{'readonly': True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly':", ":type authentication: ~video_analyzer.models.AuthenticationBase \"\"\" _validation = { 'id': {'readonly': True},", "= kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): \"\"\"Provisioning", "events or camera may not be accessible at the time.", "be described here. :type description: str :param parameters: List of", "): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None)", "model definition for a Azure Resource Manager proxy resource. It", "'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, } def __init__(", ":type provider: str :param resource: Resource on which the operation", ":ivar token: The token blob to be provided to the", ":param alg: Required. Elliptical curve algorithm to be used: ES256,", "a video resource of type 'file'. All required parameters must", "in the HTTP connections alongside the RTSP messages. Possible values", "self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type:", "management error additional info. Variables are only populated by the", "of the resource. :vartype name: str :ivar type: The type", "be skipped. Default is 'false'. :type ignore_signature: str \"\"\" _attribute_map", "'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__( self, **kwargs ): super(AuthenticationBase,", "'type': {'required': True}, 'kid': {'required': True}, } _attribute_map = {", "~video_analyzer.models.SystemData \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "set to 'false'. :type disable_rtsp_publishing: str \"\"\" _attribute_map = {", "= { 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties',", "by the user. Value can be up to 2048 characters", "'nodeName', 'type': 'str'}, } def __init__( self, **kwargs ): super(NodeInput,", "\"\"\"Metric properties. Variables are only populated by the server, and", "\"\"\" _attribute_map = { 'small': {'key': 'small', 'type': 'str'}, 'medium':", "supported time grain types. :vartype supported_time_grain_types: list[str] \"\"\" _validation =", "} def __init__( self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type =", "by server. :type type: str \"\"\" _validation = { 'type':", "available from the source. :type rtsp_tunnel_url: str :param preview_image_urls: Video", "are available when the video type is 'archive' and preview", "'e': {'required': True}, } _attribute_map = { 'type': {'key': '@type',", "the Key Vault key used to encrypt the account. The", "to be described here. :type description: str :param parameters: List", "class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters must", "define the authentication rules, and control access to specific video", "creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by: The identity that", "'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type':", "or can be created by exporting sequences from existing captured", "HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model): \"\"\"Common fields that are returned", "capacity, then the service will disconnect temporarily from the camera.", "self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps =", "Analyzer resource. All required parameters must be populated in order", "ignored when sending a request. :ivar name: The diagnostic log", "how video is published. These are only allowed for topologies", "latency but generate larger volume of storage transactions. Larger segments", "a resource. All required parameters must be populated in order", "'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName',", "super(Properties, self).__init__(**kwargs) self.service_specification = None class ResourceIdentity(msrest.serialization.Model): \"\"\"The user assigned", "{'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions':", "class for topology sink nodes. You probably want to use", "public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for", "): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link',", "long as the same processing is to be applied across", "self.download_url = kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url =", "~datetime.datetime :param last_modified_by: The identity that last modified the resource.", "are approximately double of the chosen video segment length. It", "'[TokenClaim]'}, 'keys': {'key': 'keys', 'type': '[TokenKey]'}, } def __init__( self,", "self.value = kwargs.get('value', None) class PemCertificateList(CertificateSource): \"\"\"A list of PEM", "job. :param code: The error code. :type code: str :param", "RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase \"\"\" _validation = { 'type':", "**kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str", "and published via a video resource. If archiving is enabled,", "'ErrorDetail'}, } def __init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error", "be processed. You probably want to use the sub-classes and", "__init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message", "= None self.source_mdm_account = None self.source_mdm_namespace = None self.supported_time_grain_types =", "PipelineJobOperationStatus(msrest.serialization.Model): \"\"\"Used for tracking the status of an operation on", "integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access", "'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobError,", "point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information", "kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): \"\"\"A sequence of absolute datetime ranges", "'error': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',", ":type created_by: str :param created_by_type: The type of identity that", "and will be ignored when sending a request. :ivar id:", "be ignored when sending a request. :ivar log_specifications: List of", "'true' causes the certificate subject name validation to be skipped.", "'videoEncoder', 'type': 'VideoEncoderBase'}, } def __init__( self, **kwargs ): super(EncoderCustomPreset,", "identifier for Private Endpoint. :vartype id: str \"\"\" _validation =", "PipelineJobError(msrest.serialization.Model): \"\"\"Details about the error for a failed pipeline job.", "unique RTSP camera. Variables are only populated by the server,", "} def __init__( self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type =", "prevent this value to be returned as part of the", "class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters", "how audio should be processed. You probably want to use", "topologies where \"kind\" is set to \"live\". :type retention_period: str", "{'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map", "Azure. :param name: Required. The operation name. :type name: str", "'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},", "'scale', 'type': 'VideoScale'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'}", "{ 'can_stream': {'required': True}, 'has_data': {'required': True}, 'is_in_use': {'required': True},", "'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type", "__init__( self, **kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id'] self.identity", "When activated, this live pipeline will process content according to", "'value', 'type': '[Operation]'}, } def __init__( self, **kwargs ): super(OperationCollection,", "= kwargs.get('description', None) self.default = kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): \"\"\"Defines", "{ 'key_identifier': {'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type':", "be automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar", "**kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description =", "indicates the length of individual content files (segments) which are", ":param value: A collection of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity]", "validation of TLS endpoints. :param ignore_hostname: When set to 'true'", "can be ingested from RTSP cameras through live pipelines or", "str :param bitrate_kbps: The maximum bitrate, in kilobits per second", "modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param title: Optional video title", "NodeBase(msrest.serialization.Model): \"\"\"Base class for nodes. You probably want to use", "= None class LivePipelineUpdate(ProxyResource): \"\"\"Live pipeline represents a unique instance", "of storage transactions. Larger segments reduce the amount of storage", "of the input video. :type scale: ~video_analyzer.models.VideoScale \"\"\" _validation =", "to Azure. :param name: Required. Name of the parameter declared", "Corporation. All rights reserved. # Licensed under the MIT License.", "self.code = kwargs.get('code', None) self.message = kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model):", "self.display_name = None self.blob_duration = None class MetricDimension(msrest.serialization.Model): \"\"\"A metric", "code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) - DASH", "= None self.enable_regional_mdm_account = None self.source_mdm_account = None self.source_mdm_namespace =", "is 'file' and video file is available for consumption. :type", "firewall. :type tunnel: ~video_analyzer.models.TunnelBase \"\"\" _validation = { 'type': {'required':", "or 'Stretch' then both width and height must be specified.", "kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): \"\"\"Options for", "project root for license information. # Code generated by Microsoft", "Analyzer will use to access the storage account. :type identity:", "'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, **kwargs ): super(EndpointBase,", "additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] \"\"\" _validation = { 'code': {'readonly': True}, 'message':", "for the pipeline. :type description: str :ivar state: Current state", "When using TCP, the RTP packets are interleaved on the", "for controlling the validation of TLS endpoints. :param ignore_hostname: When", "Segment length indicates the length of individual content files (segments)", "'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type':", "True}, 'token': {'readonly': True}, } _attribute_map = { 'expiration_date': {'key':", "'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoCreationProperties,", "UsernamePasswordCredentials(CredentialsBase): \"\"\"Username and password credentials. All required parameters must be", "as part of the credentials. :type username: str :param password:", "kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model):", "short lived and it is only used for the initial", "retention_period: Video retention period indicates how long the video is", "processing content for a particular outcome. The topology should be", "'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace',", "quality of the input video. :type bitrate_kbps: str :param frame_rate:", "value: Required. Expected value of the claim to be present", "'tier', 'type': 'str'}, } def __init__( self, **kwargs ): super(Sku,", "must be provided in the ISO8601 duration format in the", "super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model):", ":ivar to_be_exported_for_shoebox: Whether to export metric to shoebox. :vartype to_be_exported_for_shoebox:", "fields that are returned in the response for all Azure", "self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): \"\"\"Base class for tunnel objects.", "= { 'type': {'required': True}, 'username': {'required': True}, 'password': {'required':", "should be defined according to the scenario to be achieved", "kind. Possible values include: \"Live\", \"Batch\". :type kind: str or", "): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource): \"\"\"Live pipeline", "type. :type type: str \"\"\" _attribute_map = { 'name': {'key':", "and will be ignored when sending a request. :ivar client_id:", "'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs)", "topology processor nodes. Processor nodes enable pipeline data to be", "ProcessorNodeBase(NodeBase): \"\"\"Base class for topology processor nodes. You probably want", "error additional info. Variables are only populated by the server,", "description. :vartype display_description: str :ivar unit: The metric unit. Possible", "identity: The identities associated to the Video Analyzer resource. :type", "feed is available from the source. :type rtsp_tunnel_url: str :param", "by server. :type type: str :param bitrate_kbps: The maximum bitrate,", "Connection resource. Variables are only populated by the server, and", "camera exceeds this capacity, then the service will disconnect temporarily", "be populated in order to send to Azure. :param endpoint_url:", "\"Live\", \"Batch\". :type kind: str or ~video_analyzer.models.Kind :param sku: Describes", "\"Pending\", \"Approved\", \"Rejected\". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description:", "kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): \"\"\"A collection of PipelineTopology items. :param", "'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key':", "'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name',", "values include: \"Reader\". :type role: str or ~video_analyzer.models.AccessPolicyRole :param authentication:", "a request. :ivar expiration_date: The expiration date of the registration", "height: The desired output video height. :type height: str :param", "__init__( self, **kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None)", "Having multiple keys allow for seamless key rotation of the", "values include: \"RS256\", \"RS384\", \"RS512\". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo", "List of keys which can be used to validate access", "def __init__( self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac'", "about the state of the connection between service consumer and", "= kwargs['username'] self.password = kwargs['password'] class VideoAnalyzer(TrackedResource): \"\"\"The Video Analyzer", "str :param display: The operation display name. :type display: ~video_analyzer.models.OperationDisplay", "set to P30D (30 days), content older than 30 days", "'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, } def __init__( self, **kwargs", "status: Indicates whether the connection has been Approved/Rejected/Removed by the", "dict[str, str] :param identity: The identities associated to the Video", "'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def", "be added to the video content URL as the value", "{'readonly': True}, } _attribute_map = { 'key_identifier': {'key': 'keyIdentifier', 'type':", "List of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List", "= { 'name': {'required': True}, 'type': {'required': True}, } _attribute_map", "retained indefinitely. This property is only allowed for topologies where", "encrypted in transit). All required parameters must be populated in", "= kwargs.get('name_available', None) self.reason = kwargs.get('reason', None) self.message = kwargs.get('message',", "streaming. This is used, for example, when the topology is", "str or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint information for", "certificate authorities when authenticating a TLS connection. A null list", "self, **kwargs ): super(NodeBase, self).__init__(**kwargs) self.type = None # type:", "= kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): \"\"\"A collection of Operation items.", "{ 'name': {'required': True}, 'tier': {'readonly': True}, } _attribute_map =", "define the recipe or instructions on how the input video", "{'key': 'x', 'type': 'str'}, 'y': {'key': 'y', 'type': 'str'}, }", "self.provisioning_state = None self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model): \"\"\"Video archival", "{'readonly': True}, 'metric_specifications': {'readonly': True}, } _attribute_map = { 'log_specifications':", "'#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__( self, **kwargs ):", "super(MetricDimension, self).__init__(**kwargs) self.name = None self.display_name = None self.to_be_exported_for_shoebox =", "True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'kind': {'required': True},", "\"\"\"A metric emitted by service. Variables are only populated by", "can vary between 1 day to 10 years, in 1", "to the endpoint URL. This is an optional property, typically", "tracked top level resource which has 'tags' and a 'location'.", "for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network", "pipeline can only define or override parameters values for parameters", "self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.storage_accounts", "preview images are enabled. :param small: Low resolution preview image", "\"\"\"Base type for all audio encoder presets, which define the", "'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } def", "'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type':", "'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, }", "of private link resources. :param value: Array of private link", ":param name_available: Indicates if the resource name is available. :type", "hours or less. Currently, there can be only one range", ":vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs to the", "resource needs to be created on the service. :type video_creation_properties:", "token audiences. Token audience is valid if it matches at", "not the video can be streamed. Only \"archive\" type videos", "self.name = kwargs['name'] class Endpoint(msrest.serialization.Model): \"\"\"The endpoint details. All required", "and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival", "name: Required. Name of the parameter. :type name: str :param", "the module state lost or reset. Variables are only populated", "name: str :ivar status: The status of the pipeline job", "content for a particular outcome. The topology should be defined", "License.txt in the project root for license information. # Code", "'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, } def __init__(", "video archive segments which are intended to be kept in", "a Video Analyzer account. Variables are only populated by the", "types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype", ":vartype group_id: str :ivar required_members: The private link resource required", "is used. :type bitrate_kbps: str \"\"\" _validation = { 'type':", ":param iot_hub_name: Required. Name of the IoT Hub. :type iot_hub_name:", "or not there has ever been data recorded or uploaded", "'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'role': {'key':", "Generator. # Changes may cause incorrect behavior and will be", "type: str :param kid: Required. JWT token key id. Validation", "and published via a video resource of type 'file'. All", "is allowed for resources under the Video Analyzer account. Possible", "'{UserAssignedManagedIdentity}'}, } def __init__( self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type", "description for the pipeline. :type description: str :param bitrate_kbps: Maximum", "The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param", "'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport',", "will be lost if the code is regenerated. # --------------------------------------------------------------------------", "if the mode is 'PreserveAspectRatio' then only one of width", "sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes.", "the video is kept in storage. Value must be specified", "'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime',", "): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None self.principal_id = None class", "of the pipeline (read-only). Possible values include: \"Processing\", \"Canceled\", \"Completed\",", ":ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled. :vartype", "of one or more data sinks which allow for data", "used when the endpoint is behind a firewall. :type tunnel:", "{'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state':", "class PrivateLinkServiceConnectionState(msrest.serialization.Model): \"\"\"A collection of information about the state of", "'VideoScale'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def", "video of type 'archive'. If used in a batch topology,", "} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier':", "enabled, this results in a video of type 'archive'. If", "'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'},", "'value': {'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'},", "): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state',", "one or more data sinks which allow for data to", "{ 'type': {'key': 'type', 'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type':", "supported_time_grain_types: list[str] \"\"\" _validation = { 'name': {'readonly': True}, 'display_name':", "video recording may be gated on events or camera may", "{'readonly': True}, 'system_data': {'readonly': True}, 'kind': {'required': True}, 'sku': {'required':", "the sum of the ranges should add up to 24", "for the Video Analyzer resource. All required parameters must be", "reserved. # Licensed under the MIT License. See License.txt in", "preview images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls \"\"\" _attribute_map =", "for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a", "are only populated by the server, and will be ignored", "preview image URL. :type medium: str :param large: High resolution", "created on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options", "return in one response). :type next_link: str \"\"\" _attribute_map =", "rtsp_tunnel_url: Video low-latency streaming URL. The live content can be", "provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state of", "~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer. :type", "'description': {'key': 'description', 'type': 'str'}, 'default': {'key': 'default', 'type': 'str'},", "name: Required. Name of the claim which must be present", "'type': 'str'}, } def __init__( self, **kwargs ): super(Endpoint, self).__init__(**kwargs)", "of VideoEntity items. :param value: A collection of VideoEntity items.", "{'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required':", "for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state", "= { 'type': {'key': 'type', 'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities',", "camera may not be accessible at the time. :type is_in_use:", "{'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url':", "the input content using the encoder processor. All required parameters", ":param kind: Required. Topology kind. Possible values include: \"Live\", \"Batch\".", "RTSP server to be ingested into a pipeline. All required", "equal to 300. If omitted, the encoder uses the average", "recorded or uploaded into the video. Newly created videos have", "'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks',", "is available for consumption. :type download_url: str :param archive_base_url: Video", "str :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\"", "'type': 'str'}, } def __init__( self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs)", "then only one of width or height need be provided.", "process content according to the pipeline topology definition. :type topology_name:", "'[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags", "None self.enable_regional_mdm_account = None self.source_mdm_account = None self.source_mdm_namespace = None", "URL will not be published, disabling low latency streaming. This", "collection of EdgeModuleEntity items. :param value: A collection of EdgeModuleEntity", ":ivar unit: The metric unit. Possible values include: \"Bytes\", \"Count\",", "class MetricDimension(msrest.serialization.Model): \"\"\"A metric dimension. Variables are only populated by", "str :ivar display_description: The metric display description. :vartype display_description: str", "preset: ~video_analyzer.models.EncoderPresetBase \"\"\" _validation = { 'type': {'required': True}, 'name':", "Azure. :param type: Required. The type of key used to", "Doing so will ensure that one 'noisy neighbor' does not", "self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): \"\"\"Encoder processor allows for encoding", "the scenario to be achieved and can be reused across", ":ivar display_name: The metric display name. :vartype display_name: str :ivar", "= kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at',", "This URL can be used in conjunction with the video", "{'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin':", "self.sinks = kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): \"\"\"The Private Endpoint resource.", "header. :type kid: str :param alg: Required. Elliptical curve algorithm", "{'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity':", "part of the credentials. It is recommended that this value", "'type': 'SystemData'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs)", "of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link: A link", "tunnel: Describes the tunnel through which Video Analyzer can connect", "'issuers', 'type': '[str]'}, 'audiences': {'key': 'audiences', 'type': '[str]'}, 'claims': {'key':", "For example, if this is set to P30D (30 days),", "'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'},", "self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None) class", "None class StorageAccount(msrest.serialization.Model): \"\"\"The details about the associated storage account.", "): super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message = None self.target", "~video_analyzer.models.AuthenticationBase \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':", "as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset:", ":type value: list[~video_analyzer.models.VideoAnalyzer] \"\"\" _attribute_map = { 'value': {'key': 'value',", "content to be ingested from cameras. * Processors: list of", "**kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small', None) self.medium =", "self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model):", "= kwargs['expiration_date'] class LivePipeline(ProxyResource): \"\"\"Live pipeline represents a unique instance", "class ProcessorNodeBase(NodeBase): \"\"\"Base class for topology processor nodes. You probably", "grants access to the video content URLs.\". Variables are only", "metric display description. :vartype display_description: str :ivar unit: The metric", "self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase): \"\"\"Unsecured endpoint describes an endpoint", "'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def", "action type. Possible values include: \"Internal\". :type action_type: str or", "RtspSource(SourceNodeBase): \"\"\"RTSP source allows for media from an RTSP camera", "only one range specified in the sequence. All required parameters", "= kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): \"\"\"Video flags contain information about", "{'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials':", "Current state of the pipeline (read-only). Possible values include: \"Inactive\",", "seconds increments. Changing this value after the initial call to", "None) self.next_link = kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): \"\"\"Defines how the", "str or ~video_analyzer.models.VideoScaleMode \"\"\" _attribute_map = { 'height': {'key': 'height',", "the TCP RTSP connection. When using HTTP, the RTSP messages", "send to Azure. :param name: Required. The SKU name. Possible", "= None self.metric_specifications = None class SinkNodeBase(NodeBase): \"\"\"Base class for", "When activated, this pipeline job will process content according to", "): super(VideoEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps", "a custom preset for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase \"\"\"", "'properties.authentication', 'type': 'AuthenticationBase'}, } def __init__( self, **kwargs ): super(AccessPolicyEntity,", "backoff), checking to see if the camera bitrate is now", "for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl \"\"\" _attribute_map = {", "name: Required. Operation identifier. :type name: str :param id: Operation", "unique within the topology. :type name: str :param transport: Network", "supported only with batch pipelines. All required parameters must be", "the account. The key may either be versioned (for example", "long lived HTTP connections, and the RTP packages are interleaved", "{'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, }", "{'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg':", "can be updated at any time and the new desired", "to send to Azure. :param type: Required. The discriminator for", "scaling information. :param height: The desired output video height. :type", "content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information about the video and", "name: str :param type: Required. Type of the parameter. Possible", "{'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__( self,", "status: The current status of the storage account mapping. :vartype", "def __init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type = None", "**kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str", "Value must be specified in ISO8601 duration format (i.e. \"P1D\"" ]
[ "None curshader.write('float {0} = {1};'.format(res_var, res)) # Normal map already", "if parse_surface: # Occlude out_occlusion = '0.0' elif node.type ==", "== 'LESS_THAN': out_val = 'float({0} < {1})'.format(val1, val2) elif op", "parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0} * {1}", "elif blend == 'HUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "== node.outputs[2]: # Tangent return 'wtangent' elif socket == node.outputs[3]:", "if normal_res != None: curshader.write('n = {0};'.format(normal_res)) def is_parsed(s): global", "if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix frag.write('vec3", "{1} * 0.5))'.format(co, scale) if sample_bump: write_bump(node, res) return res", "space return 'vVecCam' elif node.type == 'NEW_GEOMETRY': if socket ==", "is parse_value path preferred? nor = parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value),", "# import math import bpy import os import arm.assets import", "'SATURATION': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "if image.packed_file is not None: filepath = './' + image.name", "Velocity particle_info['angular_velocity'] = True return 'vec3(0.0)' elif node.type == 'TANGENT':", "[6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ] if (t >= 12000): rgb[0]", "co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic({0} *", "ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2,", "== 'HAIR_INFO': return 'vec3(0.0)' # Tangent Normal elif node.type ==", "if image is None: return None # Get filepath filepath", "'1.0' elif node.type == 'AMBIENT_OCCLUSION': if parse_surface: # Single channel", "if socket == node.outputs[0]: # Is Camera Ray return '1.0'", "'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "0)'.format(fac_var, points[i].location[0]) # Write index index_var = name + '_i'", "parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name) else: uv_name =", "con.add_elem('tex', 'short2norm') # UVMaps only for now mat = mat_get_material()", "== node.outputs[3]: # Is Glossy Ray return '1.0' elif socket", "col1, col2, scale) if sample_bump: write_bump(node, res) return res elif", "else [0.0, 0.0, 0.0] if scale[0] != 1.0 or scale[1]", "== 'TEX_POINTDENSITY': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif", "'1.0': frag.write('n.xy *= {0};'.format(strength)) frag.write('n = normalize(TBN * n);') con.add_elem('tang',", "License for the specific language governing permissions and # limitations", "== 'BURN': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "size request, cache size instead powimage = is_pow(image.size[0]) and is_pow(image.size[1])", "{tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;') else: if mat_texture_grad(): curshader.write('vec4", "[4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ]", "* {3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var) out_metallic", "out_group = parse_input(inp) parents.pop() return out_group def parse_group_input(node, socket): index", "parse_volume_input(node.inputs[1]) # Displacement if _parse_displacement and disp_enabled() and node.inputs[2].is_linked: parsed", "grad == 'SPHERICAL': f = 'max(1.0 - sqrt({0}.x * {0}.x", "sample_bump global sample_bump_res # RGB if node.type == 'GROUP': return", "to_linear=False, tex_link=tex_link)) else: global parsed tex_store = store_var_name(node) # Pink", "= 'vec3(0.8)' out_roughness = '0.0' out_metallic = '0.0' out_occlusion =", "== 'BUMP': # Interpolation strength strength = parse_value_input(node.inputs[0]) # Height", "[] normal_parsed = False rpdat = arm.utils.get_rp() if rpdat.arm_rp_displacement ==", "elif socket == node.outputs[7]: # Ray Length return '0.0' elif", "get_rp_renderer(): return arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return", "parse_value_input(node.inputs[2]) # Roughness out_roughness = parse_value_input(node.inputs[3]) # Metallic out_metallic =", "elif socket == node.outputs[3]: # Object return 'mposition' elif socket", "curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "vert global frag global geom global tesc global tese global", "= {} parents = [] normal_parsed = False curshader =", "Incoming return 'vVec' elif socket == node.outputs[5]: # Parametric return", "== 'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1, vec2) else: return '0.0' ##", "Depth if socket == node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return", "= 'bposition' scale = parse_value_input(node.inputs[3]) res = 'tex_checker_f({0}, {1})'.format(co, scale)", "node.outputs[i] == socket: return i def node_name(s): for p in", "col2 = parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3]) res = 'tex_checker({0}, {1},", "[3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03,", "out_val = 'log({0})'.format(val1) elif op == 'SQRT': out_val = 'sqrt({0})'.format(val1)", "0.0, 0.0);') curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy)", "parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif node.type == 'COMBRGB': r", "curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif socket == node.outputs[4]: #", "= parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'VALTORGB':", "+ {1}) / 2.0)'.format(vec1, vec2) elif op == 'DOT_PRODUCT': return", "{1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var) out_roughness = '({0} *", "return '({0} + {1})'.format(vec1, vec2) elif op == 'SUBTRACT': return", "else: out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic = '0.0'", "0.0, 0.0] if scale[0] != 1.0 or scale[1] != 1.0", "{2})'.format(col1, col2, fac_var) elif blend == 'MULTIPLY': out_col = 'mix({0},", "node.outputs[6]: # Reflection return 'vec3(0.0)' elif node.type == 'UVMAP': #instance", "+ '_fac_inv' curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac)) curshader.write('{0}float {1}", "blend == 'DIFFERENCE': out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1,", "'RGBA': return parse_vector_input(inp) elif inp.type == 'VECTOR': return parse_vector_input(inp) elif", "# x * cos(theta) - y * sin(theta) # x", "'VALTORGB': # ColorRamp return '1.0' elif node.type == 'MATH': val1", "return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket", "OF ANY KIND, either express or implied. # See the", "'{0}.y'.format(vec) elif socket == node.outputs[2]: return '{0}.z'.format(vec) elif node.type ==", "== 'NORMAL': nor = parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif", "opac2) elif node.type == 'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19]) out_basecol =", "'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0]) elif node.type ==", "parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac) elif node.type", "return '0.0' elif socket == node.outputs[5]: # Is Reflection Ray", "node.type == 'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass elif node.type == 'SUBSURFACE_SCATTERING':", "{1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var) out_specular = '({0} *", "shutil emission_found = False particle_info = None # Particle info", "parse_surface: # Base color out_basecol = parse_vector_input(node.inputs[0]) # Occlusion out_occlusion", "0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_4 = {1}{2} +", "def vector_curve(name, fac, points): # Write Ys array ys_var =", "== node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)' #", "parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_specular = '0.0' elif node.type ==", "return '{0}.g'.format(col) elif socket == node.outputs[2]: return '{0}.b'.format(col) elif node.type", "arm.utils.get_os() == 'win': s = filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] + '.'", "PBR'): if parse_surface: # Base color out_basecol = parse_vector_input(node.inputs[0]) #", "def parse_input(inp): if inp.type == 'SHADER': return parse_shader_input(inp) elif inp.type", "parse_value_input(node.inputs[1]) sample_bump = True height = parse_value_input(node.inputs[2]) sample_bump = False", "Ray return '0.0' elif socket == node.outputs[6]: # Is Transmission", "rgb[2] = ((b[0] * t + b[1]) * t +", "== 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name", "# distortion = parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0} * {1} *", "in camera space return 'vVecCam' elif node.type == 'NEW_GEOMETRY': if", "')') return None if do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets',", "{1}, 0.0)'.format(out, math.cos(a), math.sin(a)) if location[0] != 0.0 or location[1]", "# Base color out_basecol = parse_vector_input(node.inputs[0]) # Occlusion out_occlusion =", "filepath.endswith(('.jpg', '.png', '.hdr')) if not has_ext: # Raw bytes, write", "out_val = 'abs({0})'.format(val1) elif op == 'MINIMUM': out_val = 'min({0},", "'dotNV' return 'fresnel({0}, {1})'.format(ior, dotnv) elif node.type == 'NEW_GEOMETRY': if", "== 'INTENSITY': res = 'tex_voronoi({0} * {1}).a'.format(co, scale) else: #", "0.0, 0.0, 0.0);') curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name},", "name + '_i' curshader.write('int {0} = {1};'.format(index_var, index)) # Linear", "mat_user.data.uv_layers # Second uvmap referenced if len(lays) > 1 and", "not use this file except in compliance with the License.", "curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "out_opacity = '1.0' out_emission = '0.0' return out_basecol, out_roughness, out_metallic,", "curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name)) else:", "s = parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v)", "= parse_vector_input(node.inputs[0]) # Occlusion out_occlusion = parse_value_input(node.inputs[2]) # Roughness out_roughness", "= 1 else: i = 0 r = blackbody_table_r[i] g", "'COSINE': out_val = 'cos({0})'.format(val1) elif op == 'TANGENT': out_val =", "+ {1} * 0.5)'.format(emi1, emi2) if parse_opacity: out_opacity = '({0}", "cols array cols_var = node_name(node.name) + '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems)))", "# Compute TBN matrix frag.write('vec3 texn = ({0}) * 2.0", "'tex_magic_f({0} * {1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node, res,", "# Pass throuh return parse_vector_input(node.inputs[0]) elif node.type == 'COMBXYZ': x", "= parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale)", "' + ({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position)", "def parse_value_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type ==", "== 'TEX_MUSGRAVE': # Fall back to noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked:", "curshader.write('float {0} = {1};'.format(res_var, res)) # Normal map already parsed,", "res) return res elif node.type == 'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0])", ">= 1902.0): i = 3 elif(t >= 1449.0): i =", "= socket_index(node, socket) parent = parents.pop() # Leaving group inp", "particle_info['velocity'] = True return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else", "col2, fac_var) # Revert to mix elif blend == 'DODGE':", "is_parsed(res_var): parsed[res_var] = True st = l.from_socket.type if st ==", "'ATTRIBUTE': if socket == node.outputs[0]: # Color con.add_elem('col', 'short4norm') #", "# Object return 'mposition' elif socket == node.outputs[4]: # Camera", "== 'VECTOR': return res_var else: # VALUE return 'vec3({0})'.format(res_var) else:", "tex_store = store_var_name(node) # Pink color for missing texture curshader.write('vec4", "g = blackbody_table_g[i] b = blackbody_table_b[i] t_inv = 1.0 /", "= False if to_linear: curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store", "node.space #map = node.uv_map # Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None", "rgb[1], rgb[2]]) elif node.type == 'VALTORGB': # ColorRamp fac =", "op == 'ADD': return '({0} + {1})'.format(vec1, vec2) elif op", "False if to_linear: curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store def", "(1.0 - {1})))'.format(dotnv, blend) elif node.type == 'LIGHT_PATH': if socket", "# View Distance else: curshader.add_uniform('vec3 eye', link='_cameraPosition') return 'distance(eye, wposition)'", "View Distance else: curshader.add_uniform('vec3 eye', link='_cameraPosition') return 'distance(eye, wposition)' elif", "Map node with Armory PBR, connect Image Texture directly') parse_normal_map_color_input(node.inputs[5])", "curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec1(node.outputs[0].default_value) elif node.type", "0.5)'.format(opac1, opac2) elif node.type == 'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19]) out_basecol", "node.type == 'TEX_POINTDENSITY': return '0.0' elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi)", "{0} = {1};'.format(res_var, res)) elif st == 'VALUE': res =", "== 'NEW_GEOMETRY': if socket == node.outputs[0]: # Position return 'wposition'", "return '1.0' elif node.type == 'NORMAL': nor = parse_vector_input(node.inputs[0]) return", "_vert frag = _frag geom = _geom tesc = _tesc", "{2}, {3}))'.format(out, location[0], location[1], location[2]) # use Extension parameter from", "tex_name, matname=None): tex = {} tex['name'] = tex_name image =", "parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = node_name(node.name)", "return parse_vector_input(inp) elif inp.type == 'VALUE': return parse_value_input(inp) def parse_shader_input(inp):", "{1})'.format(val1, val2) out_val = 'mod({0}, {1})'.format(val1, val2) elif op ==", "= '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_VELVET': if parse_surface:", "parse_vector(node, socket): global particle_info global sample_bump global sample_bump_res # RGB", "if location[0] != 0.0 or location[1] != 0.0 or location[2]", "con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False):", "axis for now.. a = rotation[2] # x * cos(theta)", "return None ext = s[1].lower() do_convert = ext not in", "Mipmap anisotropic tex['min_filter'] = 'anisotropic' tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] =", "if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_specular", "# VALUE return res_var else: if mat_batch() and inp.is_uniform: return", "== 'ROUND': # out_val = 'round({0})'.format(val1) out_val = 'floor({0} +", "- {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3])) curshader.write('{0}_fh1 *= ({1}) *", "socket) elif node.type == 'VERTEX_COLOR': con.add_elem('col', 'short4norm') # Vcols only", "sample_bump_res != '': if node.invert: ext = ['1', '2', '3',", "Vector con.add_elem('tex', 'short2norm') # UVMaps only for now mat =", "= 'tex_magic({0} * {1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node,", "{4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2])) # Get index fac_var =", "# Ray Depth return '0.0' elif socket == node.outputs[9]: #", "scl)) sample_bump = False def to_vec1(v): return str(v) def to_vec3(v):", "+ vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_3", "return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node, socket)", "* (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var,", "mix # out_col = '({0} + {2} * (2.0 *", "'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 /", "elif socket == node.outputs[5]: # Parametric return 'mposition' elif node.type", "return res elif node.type == 'MAPPING': out = parse_vector_input(node.inputs[0]) scale", "parsed[res_var] = True st = l.from_socket.type if st == 'RGB'", "f = '0.0' elif grad == 'DIAGONAL': f = '({0}.x", "'TEX_SKY': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type", "emi2 = parse_shader_input(node.inputs[2]) if parse_surface: out_basecol = '({0} * {3}", "= '({0} * {3} + {1} * {2})'.format(bc1, bc2, fac_var,", "len(lays) > 1 and node.attribute_name == lays[1].name: con.add_elem('tex1', 'short2norm') return", "op == 'SUBTRACT': out_val = '({0} - {1})'.format(val1, val2) elif", "elif node.type == 'HAIR_INFO': # Is Strand # Intercept #", "if not os.path.isfile(unpack_filepath): fmt = 'PNG' if new_ext == 'png'", "parse_vector_input(node.inputs[1]) # subsurface_radius = parse_vector_input(node.inputs[2]) # subsurface_color = parse_vector_input(node.inputs[3]) out_metallic", "'RGB' or st == 'RGBA' or st == 'VECTOR': return", "{0} + {2} * ((vec3(1.0) - {0}) * {1} *", "parsed res_var = res_var_name(l.from_node, l.from_socket) # Unparsed node if not", "Occlude out_occlusion = '0.0' elif node.type == 'BSDF_REFRACTION': # write_normal(node.inputs[3])", "elif node.type == 'ADD_SHADER': bc1, rough1, met1, occ1, spec1, opac1,", "'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path,", "occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1]) bc2, rough2, met2, occ2,", "l.from_node.type == 'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l) st =", "g, b) elif node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0])", "inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_displacement_input(l.from_node.inputs[0])", "* {3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var) out_emission", "# a = node.rotation[1] # out = 'vec3({0}.x * {1}", "required for image name') return None ext = s[1].lower() do_convert", "node.type == 'TEX_MUSGRAVE': # Fall back to noise curshader.add_function(c_functions.str_tex_musgrave) if", "mat in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): #", "uv_name = 'texCoord' triplanar = node.projection == 'BOX' if triplanar:", "= '1.0' elif node.type == 'AMBIENT_OCCLUSION': if parse_surface: # Single", "out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol =", "'_texread' s = safesrc(s) if '__' in s: # Consecutive", "tex_store def write_bump(node, res, scl=0.001): global sample_bump global sample_bump_res sample_bump_res", "== node.outputs[4]: # Camera return 'vec3(0.0)' # 'vposition' elif socket", "'tex_checker_f({0}, {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res elif", "elif node.type == 'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0])", "_geom tesc = _tesc tese = _tese parse_surface = _parse_surface", "Write cols array cols_var = node_name(node.name) + '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var,", "'vec3({0}.x * {1} - {0}.z * {2}, {0}.x * {2}", "fac_var, fac_inv_var) out_emission = '({0} * {3} + {1} *", "return 'pow({0}, vec3({1}))'.format(out_col, gamma) elif node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue", "uv_name)) if sample_bump: sample_bump_res = tex_store curshader.write('float {0}_1 = textureOffset({1},", "pass elif node.type == 'VOLUME_SCATTER': pass return out_basecol, out_roughness, out_metallic,", "# Normal return 'n' elif socket == node.outputs[2]: # UV", "= 'atan({0})'.format(val1) elif op == 'ARCTAN2': out_val = 'atan({0}, {1})'.format(val1,", "arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile) s = tex['file'].rsplit('.',", "elif interpolation == 'Closest': tex['min_filter'] = 'point' tex['mag_filter'] = 'point'", "/ (finish - start)) return 'mix({0}[{1}], {0}[{1} + 1], ({2}", "blend == 'HUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "= write_result(l) st = l.from_socket.type if st == 'RGB' or", "Normal if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name() + '", "now return '1.0' elif node.type == 'NORMAL': nor = parse_vector_input(node.inputs[0])", "= parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' curshader.write('float {0} =", "get function parts.. ar = res.split('(', 1) pre = ar[0]", "implied. # See the License for the specific language governing", "= 'log({0})'.format(val1) elif op == 'SQRT': out_val = 'sqrt({0})'.format(val1) elif", "== '': if image.packed_file is not None: filepath = './'", "mat in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): lays", ": 0)'.format(fac_var, elems[i].position) # Write index index_var = node_name(node.name) +", "blend == 'COLOR': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_2 =", "node.outputs[2]: return '{0}.z'.format(vec) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0])", "if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path)", "packed data / copy non-ascii texture unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled',", "emi1 = parse_shader_input(node.inputs[1]) bc2, rough2, met2, occ2, spec2, opac2, emi2", "op == 'FRACT': out_val = 'fract({0})'.format(val1) elif op == 'MODULO':", "= parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'EMISSION': if", "socket): global particle_info global sample_bump if node.type == 'GROUP': if", "* t + g[2] rgb[2] = ((b[0] * t +", "col1, col2, col3, scale) if sample_bump: write_bump(node, res) return res", "strength = parse_value_input(node.inputs[0]) # Height multiplier # distance = parse_value_input(node.inputs[1])", "to mix elif blend == 'DODGE': out_col = 'mix({0}, {1},", "= 1.0 / t rgb[0] = r[0] * t_inv +", "co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1])", "import arm.material.mat_state as mat_state import arm.material.cycles_functions as c_functions import shutil", "res elif node.type == 'TEX_POINTDENSITY': # Pass through return to_vec3([0.0,", "= parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'vec3(tex_voronoi({0} *", "+ {1} * 0.5)'.format(rough1, rough2) out_metallic = '({0} * 0.5", "curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float", "parse_surface: write_normal(node.inputs[4]) # Revert to glossy out_basecol = parse_vector_input(node.inputs[0]) out_roughness", "node.type == 'VALTORGB': # ColorRamp fac = parse_value_input(node.inputs[0]) interp =", "# out_col = '({0} + {2} * (2.0 * ({1}", "# Map vector return 'mix({0}[{1}], {0}[{1} + 1], ({2} -", "# Mix color # float f = (pos - start)", "wrd = bpy.data.worlds['Arm'] # Surface if parse_surface or parse_opacity: parsed", "-1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ]", "name + '_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const", "'(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0) -", "- {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var) elif node.type == 'CURVE_VEC':", "/ 2.0)'.format(vec1, vec2) elif op == 'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1,", "image path to assets # TODO: Khamake converts .PNG to", "in parents: s = p.name + '_' + s if", "return to_uniform(inp) else: return to_vec3(inp.default_value) def parse_vector(node, socket): global particle_info", "(1.0 / (finish - start)) return 'mix({0}[{1}], {0}[{1} + 1],", "+ '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec", "the specific language governing permissions and # limitations under the", "'wtangent' elif socket == node.outputs[3]: # True Normal return 'n'", "socket == node.outputs[0]: # Generated - bounds return 'bposition' elif", "== 'SUBTRACT': out_val = '({0} - {1})'.format(val1, val2) elif op", "== node.outputs[1]: return parse_value_input(node.inputs[7]) else: return None else: return parse_group(node,", "== 'VECTOR': res = parse_vector(l.from_node, l.from_socket) if res == None:", "sample_bump_res sample_bump_res = store_var_name(node) + '_bump' # Testing.. get function", "'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'NORMAL_MAP': if curshader ==", "{0}.z), 0.0)'.format(co) res = '(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node,", "r = parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2]) return", "-{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_4 = {1}{2}", "vec + '.z', curves[2].points), fac) elif node.type == 'CURVE_RGB': #", "return if normal_parsed: return normal_parsed = True frag.write_normal += 1", "out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2])", "'((1.0 - {2}) * {0} + {2} * ((vec3(1.0) -", "= name + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index", "curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const for i in", "* {1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend) elif", "out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0]) if parse_surface:", "- {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}])", "? 1 : 0)'.format(fac_var, elems[i].position) # Write index index_var =", "'SPHERICAL': f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y", "= {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl))", "== 'ATTRIBUTE': # Pass time till drivers are implemented if", "if node.coloring == 'INTENSITY': res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale)", "'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1])", "{0}))));'.format(col1, col2, fac) elif blend == 'LINEAR_LIGHT': out_col = 'mix({0},", "= parse_value_input(node.inputs[0]) # Height multiplier # distance = parse_value_input(node.inputs[1]) sample_bump", "= ['1', '2', '3', '4'] else: ext = ['2', '1',", "= ')' curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res,", "'({0} * {3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var)", "parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma) elif node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat)", "{2};'.format(prefix, fac_var, fac)) curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var,", "node.gradient_type if grad == 'LINEAR': f = '{0}.x'.format(co) elif grad", "'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type == 'BUMP': #", "'win': s = filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower()))", "elif(t >= 1167.0): i = 1 else: i = 0", "s = s.replace('_', '_x') return s ## def make_texture(image_node, tex_name,", "elif blend == 'BURN': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "out_occlusion = '1.0' out_specular = '1.0' out_opacity = '1.0' out_emission", "out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_VELVET': if", "0.5)'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type", "'HUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "fac_inv_var) elif node.type == 'ADD_SHADER': bc1, rough1, met1, occ1, spec1,", "index = '0' for i in range(1, len(points)): index +=", "request, cache size instead powimage = is_pow(image.size[0]) and is_pow(image.size[1]) if", "/ 3.0) * 2.5)'.format(col) elif node.type == 'SEPHSV': return '0.0'", "if interp == 'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var) else: # Linear", "to lowercase on windows if arm.utils.get_os() == 'win': s =", "'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))", "and node.image.colorspace_settings.name == 'sRGB' res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear,", "'GROUP': if node.node_tree.name.startswith('Armory PBR'): if parse_surface: # Base color out_basecol", "= 'linear' tex['generate_mipmaps'] = True elif interpolation == 'Smart': #", "out_val = 'acos({0})'.format(val1) elif op == 'ARCTANGENT': out_val = 'atan({0})'.format(val1)", "+ {0}.b * 0.11) / 3.0) * 2.5)'.format(col) elif node.type", "Normal Map node with Armory PBR, connect Image Texture directly')", "= parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1]) op = node.operation if op", "co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[4])", "1.0 - {0}.y)'.format(uv_name) else: uv_name = 'texCoord' triplanar = node.projection", "return '(vec3({0}, {1}, {2}) * {3})'.format(\\ vector_curve(name + '0', vec", "node.type == 'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness", "else: tex_store = store_var_name(node) # Pink color for missing texture", "'0.0' elif socket == node.outputs[4]: # Size particle_info['size'] = True", "basecol_only=False): output_node = node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node != None: parse_output(output_node,", "'BURN': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "{2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var) if", "0) and num != 0 def is_ascii(s): return len(s) ==", "Slow.. res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} +", "geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only) def parse_output(node, _con,", "{1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale) if", "== 'RGBA': return parse_vector_input(inp) elif inp.type == 'VECTOR': return parse_vector_input(inp)", "'(1.0 - pow({0}, ({1} < 0.5) ? 2.0 * {1}", "'asin({0})'.format(val1) elif op == 'ARCCOSINE': out_val = 'acos({0})'.format(val1) elif op", "Ray return '0.0' elif socket == node.outputs[5]: # Is Reflection", "= 'sqrt({0})'.format(val1) elif op == 'ABSOLUTE': out_val = 'abs({0})'.format(val1) elif", "if node.arm_material_param else None if tex != None: curshader.write_textures +=", "else: if inp.type == 'VALUE': # Unlinked reroute return to_vec3([0.0,", "parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale) if", "blend == 'BURN': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "{1} * {0} + {0} * (vec3(1.0) - (vec3(1.0) -", "'VERTEX_COLOR': con.add_elem('col', 'short4norm') # Vcols only for now return 'vcolor'", "{0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular)) if '_Emission' in", "== 'Tessellation' and tese != None: curshader = tese else:", "+ '_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const for", "node.type == 'VOLUME_ABSORPTION': pass elif node.type == 'VOLUME_SCATTER': pass return", "to mix elif blend == 'SOFT_LIGHT': out_col = '((1.0 -", "0.0, 1.0)'.format(out_val) else: return out_val elif node.type == 'RGBTOBW': col", "'On' else '0.0' elif socket == node.outputs[1]: # Age particle_info['age']", "global sample_bump_res # RGB if node.type == 'GROUP': return parse_group(node,", "'_store' def texture_store(node, tex, tex_name, to_linear=False, tex_link=None): global sample_bump global", "Xs array facs_var = name + '_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points)))", "res elif node.type == 'LIGHT_FALLOFF': # Constant, linear, quadratic #", "elif grad == 'QUADRATIC': f = '0.0' elif grad ==", "parse_value_input(node.inputs[3]) # Slow.. res = 'vec3(tex_noise({0} * {1}), tex_noise({0} *", "> {1})'.format(val1, val2) elif op == 'ROUND': # out_val =", "nn else: return to_vec1(node.outputs[0].default_value) elif node.type == 'WIREFRAME': #node.use_pixel_size #", "i, elems[i].position)) # Mix color # float f = (pos", "{0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_4", "-= 1 return res elif node.image == None: # Empty", "{2}))'.format(h,s,v) elif node.type == 'COMBRGB': r = parse_value_input(node.inputs[0]) g =", "elif node.type == 'SEPHSV': return '0.0' elif node.type == 'SEPRGB':", "None: return None # Get filepath filepath = image.filepath if", "# Revert to mix elif blend == 'DODGE': out_col =", "parse_vector_input(node.inputs[0]) else: co = 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 =", "global parsed tex_store = store_var_name(node) # Pink color for missing", "= blackbody_table_b[i] t_inv = 1.0 / t rgb[0] = r[0]", "tex, tex_name, to_linear=False, tex_link=None): global sample_bump global sample_bump_res global parsed", "& (num - 1)) == 0) and num != 0", "node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name() + ' - Do not use", "if parse_opacity: if len(node.inputs) > 20: out_opacity = parse_value_input(node.inputs[18]) elif", "return '{0}.x'.format(res_var) else: # VALUE return res_var else: if mat_batch()", "node.type == 'COMBXYZ': x = parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1]) z", "# TODO: Blender seems to load full images on size", "elif node.type == 'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1]) if parse_opacity: out_opacity", "= False particle_info['size'] = False particle_info['velocity'] = False particle_info['angular_velocity'] =", "Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_SKY':", "return '0.5' elif node.type == 'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0]) if", "else: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath)", "do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path):", "under the License. # import math import bpy import os", "socket == node.outputs[1]: # Normal return 'n' if curshader.shader_type ==", "t_inv + r[1] * t + r[2] rgb[1] = g[0]", "# TODO: delete cache when file changes if not os.path.isfile(converted_path):", "== 'OBJECT_INFO': return 'wposition' elif node.type == 'PARTICLE_INFO': if socket", "* n);') con.add_elem('tang', 'short4norm') frag.write_normal -= 1 def parse_value_input(inp): if", "mat_state.material.name def mat_batch(): return mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad():", "for i in range(1, len(elems)): index += ' + ({0}", "{0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} -", "else: co = ar[1][:-1] post = ')' curshader.write('float {0}_1 =", "+ '.z', curves[2].points), fac) elif node.type == 'CURVE_RGB': # RGB", "= 0.0 rgb[2] = 0.0 else: if (t >= 6365.0):", "elif node.type == 'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1])", "return 'objectInfoIndex' elif socket == node.outputs[3]: # Material Index curshader.add_uniform('float", "# transmission_roughness = parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0:", "# Pink color for missing texture curshader.write('vec4 {0} = vec4(1.0,", "'png' else 'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else: # Write bytes", "else: return to_vec3(inp.default_value) def parse_vector(node, socket): global particle_info global sample_bump", "'(vec3((1.0 - {2}) * {0} + {2} * {0} /", "== 'COMBRGB': r = parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1]) b =", "parse_surface: write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1]) if parse_opacity: out_opacity = '(1.0", "- {1}), {2})'.format(col1, col2, fac_var) elif blend == 'DARKEN': out_col", "{5}, {6})) * {3})'.format(\\ vector_curve(name + '0', vec + '.x',", "detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0}", "return tex def is_pow(num): return ((num & (num - 1))", "- 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN = cotangentFrame(n,", "drivers are implemented if node.attribute_name == 'time': curshader.add_uniform('float time', link='_time')", "'4'] else: ext = ['2', '1', '4', '3'] curshader.write('float {0}_fh1", "const for i in range(0, len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i,", "'RGBA' or st == 'VECTOR': return res_var else: # VALUE", "node.type == 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node))", "res elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co =", "for n in nodes: if n.type == ntype: return n", "inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_value_input(l.from_node.inputs[0])", "sample_bump_res # RGB if node.type == 'GROUP': return parse_group(node, socket)", "rpdat.arm_texture_filter if texfilter == 'Anisotropic': interpolation = 'Smart' elif texfilter", "== 'RGBA' or t == 'VECTOR': return 'vec3' else: return", "'0.0' elif grad == 'DIAGONAL': f = '({0}.x + {0}.y)", "== 'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0]) elif node.type", "instead powimage = is_pow(image.size[0]) and is_pow(image.size[1]) if interpolation == 'Cubic':", "elif blend == 'OVERLAY': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "'BSDF_HAIR': pass elif node.type == 'HOLDOUT': if parse_surface: # Occlude", "parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic", "in s: # Consecutive _ are reserved s = s.replace('_',", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "rgb[0] = 0.826270103 rgb[1] = 0.994478524 rgb[2] = 1.56626022 elif", "/ {1})'.format(val1, val2) elif op == 'POWER': out_val = 'pow({0},", "l.from_socket) if res == None: return None curshader.write('float {0} =", "node.type == 'NORMAL': nor = parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor)", "# VALUE return 'vec3({0})'.format(res_var) else: if inp.type == 'VALUE': #", "= '0.0' out_occlusion = '1.0' out_specular = '1.0' out_opacity =", "to_linear=False, tex_link=None): global sample_bump global sample_bump_res global parsed tex_store =", "'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath = os.path.join(unpack_path, image.name +", "{1})'.format(val1, val2) elif op == 'ROUND': # out_val = 'round({0})'.format(val1)", "# Is Glossy Ray return '1.0' elif socket == node.outputs[4]:", "elif node.type == 'NORMAL': nor = parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value),", "only for now mat = mat_get_material() mat_users = mat_get_material_users() if", "# Reflection return 'vec3(0.0)' elif node.type == 'UVMAP': #instance =", "] blackbody_table_g = [ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01],", "or os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else: if not", "if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): if parse_surface: #", "image if do_convert: new_ext = 'png' if (ext in ('tga',", "ar = res.split('(', 1) pre = ar[0] + '(' if", "geom = _geom tesc = _tesc tese = _tese parse_surface", "if len(node.inputs) > 20: out_opacity = parse_value_input(node.inputs[18]) elif node.type ==", "== 'TEX_ENVIRONMENT': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif", "= parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) #", "'frag' else 'wnormal' elif socket == node.outputs[4]: # Incoming return", "else: # Vector con.add_elem('tex', 'short2norm') # UVMaps only for now", "1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04,", "def parse_value(node, socket): global particle_info global sample_bump if node.type ==", "assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name(): return mat_state.material.name def", "Khamake converts .PNG to .jpg? Convert ext to lowercase on", "val2) elif op == 'DIVIDE': out_val = '({0} / {1})'.format(val1,", "to_vec3(socket.default_value) elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co =", "= inp.links[0] if l.from_node.type == 'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var =", "elif op == 'SUBTRACT': return '({0} - {1})'.format(vec1, vec2) elif", "curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3}", "= inp.links[0] if l.from_node.type == 'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var =", "= cotangentFrame(n, -vVec, texCoord);') frag.write('n = TBN * normalize(texn);') else:", "== 'BLACKBODY': t = float(parse_value_input(node.inputs[0])) rgb = [0,0,0] blackbody_table_r =", "{1}), tex_noise({0} * {1} + 0.33), tex_noise({0} * {1} +", "elif op == 'ARCCOSINE': out_val = 'acos({0})'.format(val1) elif op ==", "frag.add_include('std/normals.glsl') frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);') frag.write('n = TBN", "curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co,", "node.vector_type #conv_from = node.convert_from #conv_to = node.convert_to # Pass throuh", "map to cycles - 450 to 600 nanometers return 'wavelength_to_rgb(({0}", "+ vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2]) # use Extension", "0.1) return res elif node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked:", "the License is distributed on an \"AS IS\" BASIS, #", "size is different or file does not exist yet if", "tex_noise({0} * {1} + 0.66))'.format(co, scale) if sample_bump: write_bump(node, res,", "'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "uv_name = parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name) else:", "Reference image name texpath = arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath) tex['file']", "and node.uv_map == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 -", "con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)' return 'vec3(texCoord.x,", "+ '_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const for", "color # float f = (pos - start) * (1.0", "== 'MULTIPLY': out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2,", "'({0} * {3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var)", "== 'VALUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "== 'DARKEN': out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var)", "prefix = '' if node.inputs[0].is_linked else 'const ' fac =", "+ ' - Do not use Normal Map node with", "return res elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/'", "# aniso = parse_vector_input(node.inputs[8]) # aniso_rot = parse_vector_input(node.inputs[9]) # sheen", "strength)) curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b =", "'point' # else defaults to linear if image_node.extension != 'REPEAT':", "'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath = os.path.join(unpack_path,", "t rgb[0] = r[0] * t_inv + r[1] * t", "node.blend_type if blend == 'MIX': out_col = 'mix({0}, {1}, {2})'.format(col1,", "{tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y > 0)", "'linear' tex['generate_mipmaps'] = True elif interpolation == 'Smart': # Mipmap", "parse_opacity, parse_displacement, basecol_only) def parse_output(node, _con, _vert, _frag, _geom, _tesc,", "2.0))) * n)'.format(sample_bump_res) sample_bump_res = '' else: res = 'n'", "socket == node.outputs[3]: # Is Glossy Ray return '1.0' elif", "height = parse_value_input(node.inputs[2]) sample_bump = False nor = parse_vector_input(node.inputs[3]) if", "'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0 - texCoord.y,", "= '({0} * 0.5 + {1} * 0.5)'.format(met1, met2) out_occlusion", "op = node.operation if op == 'ADD': out_val = '({0}", "this file except in compliance with the License. # You", "'floor({0})'.format(val1) elif op == 'CEIL': out_val = 'ceil({0})'.format(val1) elif op", "'1.0' out_opacity = '1.0' out_emission = '0.0' return out_basecol, out_roughness,", "global frag if basecol_only: return if inp.is_linked == False: return", "res elif node.type == 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)):", "* {2})'.format(bc1, bc2, fac_var, fac_inv_var) out_roughness = '({0} * {3}", "curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co,", "parse_vector_input(node.inputs[3]) return '(vec3({0}) * {1})'.format(height, scale) def parse_normal_map_color_input(inp, strength_input=None): global", "y * sin(theta) # x * sin(theta) + y *", "return nn else: return to_vec1(node.outputs[0].default_value) elif node.type == 'WIREFRAME': #node.use_pixel_size", "{1})'.format(out_col, fac) elif node.type == 'MIX_RGB': fac = parse_value_input(node.inputs[0]) fac_var", "node.type == 'VALTORGB': # ColorRamp return '1.0' elif node.type ==", "curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif", "node.coloring == 'INTENSITY': res = 'tex_voronoi({0} * {1}).a'.format(co, scale) else:", "== 'VALTORGB': # ColorRamp return '1.0' elif node.type == 'MATH':", "image.source == \"GENERATED\": unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if", "{} parents = [] normal_parsed = False rpdat = arm.utils.get_rp()", "+ image.name has_ext = filepath.endswith(('.jpg', '.png', '.hdr')) if not has_ext:", "file_format=fmt) else: # Write bytes if size is different or", "< {1})'.format(val1, val2) elif op == 'GREATER_THAN': out_val = 'float({0}", "* 0.5)'.format(spec1, spec2) out_emission = '({0} * 0.5 + {1}", "# specular_tint = parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7]) # aniso =", "'({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2) out_specular =", "Ray return '1.0' elif socket == node.outputs[4]: # Is Singular", "= parse_vector_input(node.inputs[0]) + '.r' elif node.type == 'BSDF_ANISOTROPIC': if parse_surface:", "'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket ==", "dotnv) elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[6]: #", "== 'RGBA' or st == 'VECTOR': return res_var else: #", "'_' + s if curshader.write_textures > 0: s += '_texread'", "!= 1.0 or scale[2] != 1.0: out = '({0} *", "# mapping.curves[0].points[0].handle_type # bezier curve return '(vec3({0}, {1}, {2}) *", "None: return inp = output_node.inputs[index] parents.append(node) out_group = parse_input(inp) parents.pop()", "# Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom' elif node.type ==", "return res elif node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co", "return to_vec3(inp.default_value) def parse_vector(node, socket): global particle_info global sample_bump global", "{uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name},", "res) return res elif node.type == 'TEX_IMAGE': # Already fetched", "parents = [] normal_parsed = False curshader = frag out_basecol,", "= store_var_name(node) # Pink color for missing texture parsed[tex_store] =", "back to noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "== 'POWER': out_val = 'pow({0}, {1})'.format(val1, val2) elif op ==", "# x * sin(theta) + y * cos(theta) out =", "f: f.write(image.packed_file.data) # Copy non-ascii texture else: if not os.path.isfile(unpack_filepath)", "= parse_vector_input(node.inputs[8]) # aniso_rot = parse_vector_input(node.inputs[9]) # sheen = parse_vector_input(node.inputs[10])", "== 'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness =", "ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0,", "return '{0}.r'.format(col) elif socket == node.outputs[1]: return '{0}.g'.format(col) elif socket", "if socket == node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif socket == node.outputs[1]:", "== node.outputs[1]: # Age particle_info['age'] = True return 'p_age' if", "elif socket == node.outputs[1]: return '{0}.y'.format(vec) elif socket == node.outputs[2]:", "# Height multiplier # distance = parse_value_input(node.inputs[1]) sample_bump = True", "to_vec3([rgb[0], rgb[1], rgb[2]]) elif node.type == 'VALTORGB': # ColorRamp fac", "== 'RADIAL': f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)", "parse_vector_input(inp) if normal_res != None: curshader.write('n = {0};'.format(normal_res)) def is_parsed(s):", "curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec3(socket.default_value) elif node.type", "opac2, emi2 = parse_shader_input(node.inputs[1]) if parse_surface: out_basecol = '({0} +", "Revert to mix elif blend == 'SATURATION': out_col = 'mix({0},", "node.type == 'TEX_SKY': # Pass through return to_vec3([0.0, 0.0, 0.0])", "fac_var) # Revert to mix elif blend == 'DODGE': out_col", "== 'SATURATION': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "in range(0, len(node.outputs)): if node.outputs[i] == socket: return i def", "== node.outputs[4]: # Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom' elif", "return '0.0' ## def vector_curve(name, fac, points): # Write Ys", "tex_name, uv_name)) curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name,", "= 'linear' tex['generate_mipmaps'] = True elif interpolation == 'Closest': tex['min_filter']", "return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr) elif node.type == 'GAMMA':", "/ (1.0 - {0}), {1})'.format(blend, dotnv) elif socket == node.outputs[1]:", "out_val = '({0} + {1})'.format(val1, val2) elif op == 'SUBTRACT':", "pass elif node.type == 'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1]) if parse_opacity:", "'.r' elif node.type == 'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4]) # Revert", "{0}.g * 0.59 + {0}.b * 0.11) / 3.0) *", "'Tessellation' and tese != None: curshader = tese else: curshader", "socket == node.outputs[9]: # Transparent Depth return '0.0' elif socket", "scale = parse_value_input(node.inputs[1]) res = 'tex_wave_f({0} * {1})'.format(co, scale) if", "# Leaving group inp = parent.inputs[index] res = parse_input(inp) parents.append(parent)", "{1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float", "True, tex_link=tex_link)) else: tex_store = store_var_name(node) # Pink color for", "con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only)", "return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0", "= '({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2) out_specular", "parse_opacity: parsed = {} parents = [] normal_parsed = False", "({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) if strength_input != None: strength", "1) pre = ar[0] + '(' if ',' in ar[1]:", "# Vector elif node.type == 'CAMERA': # View Vector in", "True curshader.write_textures += 1 curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0,", "node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "+ {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co) res", "write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0]) # subsurface = parse_vector_input(node.inputs[1]) # subsurface_radius", "= parse_input(inp) parents.pop() return out_group def parse_group_input(node, socket): index =", "elif inp.type == 'VALUE': return parse_value_input(inp) def parse_shader_input(inp): if inp.is_linked:", "'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var) else: # Linear # Write facs", "if node.invert: ext = ['1', '2', '3', '4'] else: ext", "import arm.log import arm.material.mat_state as mat_state import arm.material.cycles_functions as c_functions", "6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01]", "sample_bump: write_bump(node, res) return res elif node.type == 'BRIGHTCONTRAST': out_col", "tex_name = safesrc(node.name) tex = make_texture(node, tex_name) tex_link = node.name", "0: s += '_texread' s = safesrc(s) if '__' in", "parse_vector_input(node.inputs[0]) else: co = 'bposition' grad = node.gradient_type if grad", "== 'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1]) if parse_opacity: out_opacity = '(1.0", "curshader.write('vec3 disp = {0};'.format(out_disp)) def parse_group(node, socket): # Entering group", "!= 0.0: out_emission = parse_value_input(node.inputs[6]) emission_found = True if parse_opacity:", "node.type == 'HOLDOUT': if parse_surface: # Occlude out_occlusion = '0.0'", "if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' grad", "op == 'MAXIMUM': out_val = 'max({0}, {1})'.format(val1, val2) elif op", "op == 'CEIL': out_val = 'ceil({0})'.format(val1) elif op == 'FRACT':", "particle_info global sample_bump global sample_bump_res con = _con vert =", "== 'Point': interpolation = 'Closest' # TODO: Blender seems to", "if sample_bump: write_bump(node, res) return res elif node.type == 'BRIGHTCONTRAST':", "== 'WIREFRAME': #node.use_pixel_size # size = parse_value_input(node.inputs[0]) return '0.0' elif", "'VECTOR': return parse_vector_input(inp) elif inp.type == 'VALUE': return parse_value_input(inp) def", "if curshader.shader_type == 'frag' else 'wnormal' elif socket == node.outputs[4]:", "parse_value_input(node.inputs[4]) res = 'tex_brick_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node,", "{2})'.format(r, g, b) elif node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl =", "Strand # Intercept # Thickness return '0.5' elif node.type ==", "curshader = frag out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission", "-= 1 return '{0}.rgb'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if", "= parse_value_input(node.inputs[1]) sample_bump = True height = parse_value_input(node.inputs[2]) sample_bump =", "elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "#instance = node.from_instance con.add_elem('tex', 'short2norm') mat = mat_get_material() mat_users =", "1 res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link)) curshader.write_textures -= 1", "License, Version 2.0 (the \"License\"); # you may not use", "tex['file'] = '' return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link)) else:", "Roughly map to cycles - 450 to 600 nanometers return", "(t >= 12000): rgb[0] = 0.826270103 rgb[1] = 0.994478524 rgb[2]", "if op == 'ADD': out_val = '({0} + {1})'.format(val1, val2)", "out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_shader(node, socket): global emission_found", "'Closest' # TODO: Blender seems to load full images on", "image_node.interpolation rpdat = arm.utils.get_rp() texfilter = rpdat.arm_texture_filter if texfilter ==", "= 'vec3(tex_wave_f({0} * {1}))'.format(co, scale) if sample_bump: write_bump(node, res) return", "!= 'RGBA32': # tex['format'] = image_format interpolation = image_node.interpolation rpdat", "parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) blend = node.blend_type if blend ==", "- vec3(0.5))))'.format(col1, col2, fac_var) if node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col)", "? 1 : 0)'.format(fac_var, points[i].location[0]) # Write index index_var =", "elif node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0]) s =", "to cycles - 450 to 600 nanometers return 'wavelength_to_rgb(({0} -", "col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3]) res", "{1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'VALTORGB': # ColorRamp return '1.0'", "tex['generate_mipmaps'] = False return tex def is_pow(num): return ((num &", "store_var_name(node) if is_parsed(tex_store): return tex_store parsed[tex_store] = True mat_bind_texture(tex) con.add_elem('tex',", "elif socket == node.outputs[2]: return '{0}.b'.format(col) elif node.type == 'SEPXYZ':", "'3a', vec + '.x', curves[3].points), vector_curve(name + '3b', vec +", "+ image.name + ' - invalid file path') return None", "l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var", "Distance else: curshader.add_uniform('vec3 eye', link='_cameraPosition') return 'distance(eye, wposition)' elif node.type", "# UVMaps only for now mat = mat_get_material() mat_users =", "if op == 'ADD': return '({0} + {1})'.format(vec1, vec2) elif", "== 'BOX' if triplanar: curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1", "== 'MOVIE': tex['source'] = 'movie' tex['min_filter'] = 'linear' tex['mag_filter'] =", "assets # TODO: Khamake converts .PNG to .jpg? Convert ext", "fac,\\ vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name +", "parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_specular =", "arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket == node.outputs[6]: #", "return 'vec3(0.0)' # Tangent Normal elif node.type == 'OBJECT_INFO': return", "grad == 'RADIAL': f = 'atan({0}.y, {0}.x) / PI2 +", "'vec3(tex_wave_f({0} * {1}))'.format(co, scale) if sample_bump: write_bump(node, res) return res", "# CELLS res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale) if sample_bump:", "elif socket == node.outputs[2]: # Tangent return 'wtangent' elif socket", "elif socket == node.outputs[4]: # Camera return 'vec3(0.0)' # 'vposition'", "{1} * 0.5)'.format(emi1, emi2) if parse_opacity: out_opacity = '({0} *", "frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular", "def get_sdk_path(): return arm.utils.get_sdk_path() def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def warn(text):", "* {1}).rgb'.format(co, scale) if sample_bump: write_bump(node, res) return res elif", "parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(r, g, b) elif node.type ==", "inp.links[0].from_node.type != 'GROUP_INPUT': normal_res = parse_vector_input(inp) if normal_res != None:", "new_ext if image.packed_file is not None or not is_ascii(texfile): #", "= 'linear' tex['mipmap_filter'] = 'no' tex['generate_mipmaps'] = False return tex", "def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad(): return mat_state.texture_grad def mat_get_material(): return", "* {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) if location[0] != 0.0 or", "= 'vec3({0}.x * {1} - {0}.z * {2}, {0}.x *", "'({0} * {3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var)", "'PARTICLE_INFO': if socket == node.outputs[3]: # Location particle_info['location'] = True", "# Size particle_info['size'] = True return '1.0' elif node.type ==", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "val2) elif op == 'MULTIPLY': out_val = '({0} * {1})'.format(val1,", "-8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06,", "inp.is_uniform: return to_uniform(inp) else: return to_vec3(inp.default_value) def parse_vector(node, socket): global", "are implemented if node.attribute_name == 'time': curshader.add_uniform('float time', link='_time') return", "if inp.is_linked == False: return if normal_parsed: return normal_parsed =", "{0};'.format(strength)) frag.write('n = normalize(TBN * n);') con.add_elem('tang', 'short4norm') frag.write_normal -=", "module builds upon Cycles nodes work licensed as # Copyright", "node.type == 'VALUE': if node.arm_material_param: nn = 'param_' + node_name(node.name)", "elif blend == 'SUBTRACT': out_col = 'mix({0}, {0} - {1},", "= image_node.image if matname is None: matname = mat_state.material.name if", "referenced if len(lays) > 1 and node.uv_map == lays[1].name: con.add_elem('tex1',", "{1})'.format(vec1, vec2) elif op == 'NORMALIZE': return 'normalize({0})'.format(vec1) elif node.type", "= 'tex_checker_f({0}, {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res", "parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3]) return '(vec3({0}) * {1})'.format(height, scale) def", "elif socket == node.outputs[4]: # Is Singular Ray return '0.0'", "'VECTOR': return 'vec3' else: return 'float' def to_uniform(inp): uname =", "# Pointiness return '0.0' elif node.type == 'HAIR_INFO': # Is", "{tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z > 0)", "parse_value_input(node.inputs[2]) sample_bump = False nor = parse_vector_input(node.inputs[3]) if sample_bump_res !=", "node.outputs[4]: # Camera return 'vec3(0.0)' # 'vposition' elif socket ==", "== node.outputs[2]: return '{0}.z'.format(vec) elif node.type == 'VECT_MATH': vec1 =", "* {3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var) elif", "= True st = l.from_socket.type if st == 'RGB' or", "ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump = False if to_linear: curshader.write('{0}.rgb", "4.0)'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res elif", "{1}, {2}))'.format(h,s,v) elif node.type == 'COMBRGB': r = parse_value_input(node.inputs[0]) g", "{0};'.format(out_emission)) if parse_opacity: frag.write('opacity = {0} - 0.0002;'.format(out_opacity)) # Volume", "{2} * {0} / {1}))'.format(col1, col2, fac_var) elif blend ==", "elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[0]: # Position", "clearcoat_rough = parse_vector_input(node.inputs[13]) # ior = parse_vector_input(node.inputs[14]) # transmission =", "(vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac) elif", "{0};'.format(out_disp)) def parse_group(node, socket): # Entering group index = socket_index(node,", "= '' return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link)) else: tex_store", "False: return if normal_parsed: return normal_parsed = True frag.write_normal +=", "curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac)) curshader.write('{0}float {1} = 1.0", "file not found(' + filepath + ')') return None if", "co = ar[1][:-1] post = ')' curshader.write('float {0}_1 = {1}{2}", "> {1} ? 1 : 0)'.format(fac_var, elems[i].position) # Write index", "= node_name(node.name) + '_i' curshader.write('int {0} = {1};'.format(index_var, index)) if", "elif interpolation == 'Smart': # Mipmap anisotropic tex['min_filter'] = 'anisotropic'", "== 'frag' else 'wnormal' elif socket == node.outputs[2]: # Tangent", "if basecol_only: return if inp.is_linked == False: return if normal_parsed:", "{1})'.format(val1, val2) elif op == 'SUBTRACT': out_val = '({0} -", "= 4 elif(t >= 1902.0): i = 3 elif(t >=", "def mat_batch(): return mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad(): return", "vec2) elif op == 'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1, vec2) elif", "node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale =", "time till drivers are implemented if node.attribute_name == 'time': curshader.add_uniform('float", "= parse_value_input(node.inputs[3]) res = 'tex_checker_f({0}, {1})'.format(co, scale) if sample_bump: write_bump(node,", "interpolation == 'Cubic': # Mipmap linear tex['mipmap_filter'] = 'linear' tex['generate_mipmaps']", "{1} * 0.5)'.format(rough1, rough2) out_metallic = '({0} * 0.5 +", "parse_vector_input(node.inputs[13]) # ior = parse_vector_input(node.inputs[14]) # transmission = parse_vector_input(node.inputs[15]) #", "1.0 / t rgb[0] = r[0] * t_inv + r[1]", "node.object #instance = node.from_instance if socket == node.outputs[0]: # Generated", "'MOVIE': tex['source'] = 'movie' tex['min_filter'] = 'linear' tex['mag_filter'] = 'linear'", "Normal return 'n' elif socket == node.outputs[2]: # UV con.add_elem('tex',", "met2) out_occlusion = '({0} * 0.5 + {1} * 0.5)'.format(occ1,", "' - file extension required for image name') return None", "out_occlusion = '0.0' elif node.type == 'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass", "write_bump(node, res) return res elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path()", "elif socket == node.outputs[4]: # Incoming return 'vVec' elif socket", "image_format interpolation = image_node.interpolation rpdat = arm.utils.get_rp() texfilter = rpdat.arm_texture_filter", "{3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var) if parse_opacity:", "== 'DIVIDE': out_val = '({0} / {1})'.format(val1, val2) elif op", "return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path()", "vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_4 =", "TODO: Blender seems to load full images on size request,", "out_roughness = '1.0' out_metallic = '1.0' elif node.type == 'VOLUME_ABSORPTION':", "scale = parse_value_input(node.inputs[3]) res = 'tex_checker_f({0}, {1})'.format(co, scale) if sample_bump:", "'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1]) # if node.use_max: #", "('tga', 'dds')) else 'jpg' tex['file'] = tex['file'].rsplit('.', 1)[0] + '.'", "socket == node.outputs[5]: # Window return 'vec3(0.0)' # 'wvpposition' elif", "'movie' tex['min_filter'] = 'linear' tex['mag_filter'] = 'linear' tex['mipmap_filter'] = 'no'", "socket_index(node, socket): for i in range(0, len(node.outputs)): if node.outputs[i] ==", "normal_parsed = False curshader = frag out_basecol, out_roughness, out_metallic, out_occlusion,", "return '0.0' elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co", "lowercase on windows if arm.utils.get_os() == 'win': s = filepath.rsplit('.',", "= 'texCoord' triplanar = node.projection == 'BOX' if triplanar: curshader.write(f'vec3", "if (t >= 6365.0): i = 5 elif(t >= 3315.0):", "return 'time' else: return '0.0' elif node.type == 'CAMERA': #", "= '({0} + {1})'.format(bc1, bc2) out_roughness = '({0} * 0.5", "ar[1].split(',', 1) co = ar2[0] post = ',' + ar2[1]", "nor = parse_vector_input(node.inputs[3]) return '(vec3({0}) * {1})'.format(height, scale) def parse_normal_map_color_input(inp,", "map already parsed, return elif l.from_node.type == 'NORMAL_MAP': return None", "'LIGHTEN': out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var) elif", "+ '.r' elif node.type == 'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4]) #", "node.type == 'BSDF_TRANSPARENT': if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))", "- start) * (1.0 / (finish - start)) return 'mix({0}[{1}],", "st == 'RGBA' or st == 'VECTOR': return res_var else:", "== node.outputs[6]: # Angular Velocity particle_info['angular_velocity'] = True return 'vec3(0.0)'", "else: #space = node.space #map = node.uv_map # Color parse_normal_map_color_input(node.inputs[1],", "def safesrc(name): return arm.utils.safesrc(name) def get_sdk_path(): return arm.utils.get_sdk_path() def disp_enabled():", "= False sample_bump = False sample_bump_res = '' wrd =", "or st == 'VECTOR': return res_var else: # VALUE return", "== 'BSDF_TOON': # write_normal(node.inputs[3]) pass elif node.type == 'BSDF_TRANSLUCENT': if", "'vcolor' else: # Vector con.add_elem('tex', 'short2norm') # UVMaps only for", "index = socket_index(node, socket) output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node", "co, post, scl)) curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0,", "'vec3' else: return 'float' def to_uniform(inp): uname = safesrc(inp.node.name) +", "== 'TEX_POINTDENSITY': return '0.0' elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path()", "= store_var_name(node) if is_parsed(tex_store): return tex_store parsed[tex_store] = True mat_bind_texture(tex)", "if do_convert: new_ext = 'png' if (ext in ('tga', 'dds'))", "out_metallic = '({0} * 0.5 + {1} * 0.5)'.format(met1, met2)", "out_basecol = parse_vector_input(node.inputs[0]) out_roughness = '1.0' out_metallic = '1.0' elif", "{1} * 0.5)'.format(spec1, spec2) out_emission = '({0} * 0.5 +", "'LIGHT_PATH': if socket == node.outputs[0]: # Is Camera Ray return", "out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend", "== 'RGB' or st == 'RGBA' or st == 'VECTOR':", "- y * sin(theta) # x * sin(theta) + y", "frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular)) if", "> {1} ? 1 : 0)'.format(fac_var, points[i].location[0]) # Write index", "write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = '1.0' out_metallic = '1.0'", "res elif node.type == 'TEX_ENVIRONMENT': # Pass through return to_vec3([0.0,", "+ {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var) out_metallic = '({0}", "parsed tex_store = store_var_name(node) if is_parsed(tex_store): return tex_store parsed[tex_store] =", "curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)' # View Distance else:", "i in range(0, len(node.outputs)): if node.outputs[i] == socket: return i", "return res elif node.image == None: # Empty texture tex", "parents.pop() # Leaving group inp = parent.inputs[index] res = parse_input(inp)", "sheen = parse_vector_input(node.inputs[10]) # sheen_tint = parse_vector_input(node.inputs[11]) # clearcoat =", "#conv_to = node.convert_to # Pass throuh return parse_vector_input(node.inputs[0]) elif node.type", "frag.write('n = TBN * normalize(texn);') else: frag.write('vec3 n = ({0})", "write_normal(node.inputs[3]) pass elif node.type == 'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4]) out_basecol", "# Mipmap linear tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif", "'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else: return out_col elif node.type == 'BLACKBODY':", "return 'vVecCam' elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[0]:", "sin(theta) # x * sin(theta) + y * cos(theta) out", "= 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale) if sample_bump: write_bump(node,", "# Interpolation strength strength = parse_value_input(node.inputs[0]) # Height multiplier #", "writing, software # distributed under the License is distributed on", "spec2, opac2, emi2 = parse_shader_input(node.inputs[1]) if parse_surface: out_basecol = '({0}", "= tex_name image = image_node.image if matname is None: matname", "node.convert_to # Pass throuh return parse_vector_input(node.inputs[0]) elif node.type == 'COMBXYZ':", "return s ## def make_texture(image_node, tex_name, matname=None): tex = {}", "res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33),", "scale = parse_value_input(node.inputs[1]) res = 'tex_magic_f({0} * {1} * 4.0)'.format(co,", "{2}, {0}.y * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a),", "= node_name(node.name) tex = make_texture(node, tex_name) tex_link = node.name if", "curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "has_ext: # Raw bytes, write converted .jpg to /unpacked filepath", "out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0' elif", "out_val = 'sin({0})'.format(val1) elif op == 'COSINE': out_val = 'cos({0})'.format(val1)", "b[2]) * t + b[3] # Pass constant return to_vec3([rgb[0],", "450 to 600 nanometers return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl)", "'(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_HAIR': pass elif node.type", "len(s) == 1: arm.log.warn(matname + '/' + image.name + '", "node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1])", "'Smart' elif texfilter == 'Linear': interpolation = 'Linear' elif texfilter", "parse_shader_input(node.inputs[1]) bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2])", "== 'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1]) if parse_opacity:", "nodes work licensed as # Copyright 2011-2013 Blender Foundation #", "else: curshader = vert out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp =", "# a = node.rotation[0] # out = 'vec3({0}.y * {1}", "Is Diffuse Ray return '1.0' elif socket == node.outputs[3]: #", "# Raw bytes, write converted .jpg to /unpacked filepath +=", "* 2.5)'.format(col) elif node.type == 'SEPHSV': return '0.0' elif node.type", "return '(vec3({0}) * {1})'.format(height, scale) def parse_normal_map_color_input(inp, strength_input=None): global normal_parsed", "== 'ADD': return '({0} + {1})'.format(vec1, vec2) elif op ==", "# Fall back to noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co =", "+ '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if", "= '({0} * {3} + {1} * {2})'.format(opac1, opac2, fac_var,", "# if node.rotation[1] != 0.0: # a = node.rotation[1] #", "os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else: if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material '", "file does not exist yet if image.packed_file is not None:", "> 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y", "{0} - {1}, {2})'.format(col1, col2, fac_var) elif blend == 'SCREEN':", "# Particle info export def parse(nodes, con, vert, frag, geom,", "and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name() + ' - Do not", "'1.0' elif node.type == 'EMISSION': if parse_surface: # Multiply basecol", "= '({0} * {3} + {1} * {2})'.format(emi1, emi2, fac_var,", "cotangentFrame(n, -vVec, texCoord);') frag.write('n = TBN * normalize(texn);') else: frag.write('vec3", "+= 1 curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures", "def parse_shader(node, socket): global emission_found out_basecol = 'vec3(0.8)' out_roughness =", "= parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'NORMAL_MAP':", "particle_info = None # Particle info export def parse(nodes, con,", "do_convert: new_ext = 'png' if (ext in ('tga', 'dds')) else", "if node.use_max: # out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0],", "- 1.0;'.format(parse_vector_input(inp))) if strength_input != None: strength = parse_value_input(strength_input) if", "= ((b[0] * t + b[1]) * t + b[2])", "n);') con.add_elem('tang', 'short4norm') frag.write_normal -= 1 def parse_value_input(inp): if inp.is_linked:", "# use Extension parameter from the Texture node instead #", "= parse_value_input(node.inputs[6]) emission_found = True if parse_opacity: out_opacity = parse_value_input(node.inputs[1])", "'POWER': out_val = 'pow({0}, {1})'.format(val1, val2) elif op == 'LOGARITHM':", "Convert image if do_convert: new_ext = 'png' if (ext in", "glossy out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0'", "'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path) else: # Link image path", "== 'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1, vec2) elif op == 'CROSS_PRODUCT':", "# Revert to mix # out_col = '({0} + {2}", "filepath filepath = image.filepath if filepath == '': if image.packed_file", "val2) elif op == 'LOGARITHM': out_val = 'log({0})'.format(val1) elif op", "node.type == 'MAPPING': out = parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value rotation", "= 'bposition' scale = parse_value_input(node.inputs[4]) res = 'tex_brick_f({0} * {1})'.format(co,", "'MULTIPLY': out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var)", "nodes only once global parents global normal_parsed global curshader #", "socket == node.outputs[7]: # Pointiness return '0.0' elif node.type ==", "{1})'.format(val1, val2) elif op == 'LESS_THAN': out_val = 'float({0} <", "# limitations under the License. # import math import bpy", "* 4.0)'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res", "'_i' curshader.write('int {0} = {1};'.format(index_var, index)) if interp == 'CONSTANT':", "'{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link)) else: global parsed tex_store =", "elif socket == node.outputs[2]: return '{0}.z'.format(vec) elif node.type == 'VECT_MATH':", "0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;') else: if mat_texture_grad():", "== 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv =", "op == 'GREATER_THAN': out_val = 'float({0} > {1})'.format(val1, val2) elif", "mat_texture_grad(): return mat_state.texture_grad def mat_get_material(): return mat_state.material def mat_get_material_users(): return", "'ADD_SHADER': bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0])", "out_emission = '0.0' return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity,", "parse_vector_input(node.inputs[0]) # subsurface = parse_vector_input(node.inputs[1]) # subsurface_radius = parse_vector_input(node.inputs[2]) #", "fac_var) elif blend == 'SUBTRACT': out_col = 'mix({0}, {0} -", "multiplier # distance = parse_value_input(node.inputs[1]) sample_bump = True height =", "# Lifetime particle_info['lifetime'] = True return 'p_lifetime' if arm.utils.get_rp().arm_particles ==", "not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else:", "elif socket == node.outputs[6]: # Is Transmission Ray return '0.0'", "out_specular, out_opacity, out_emission def parse_displacement_input(inp): if inp.is_linked: l = inp.links[0]", "= _parse_opacity basecol_only = _basecol_only emission_found = False particle_info =", "'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'MIX_SHADER': prefix =", "= node_name(node.name) + '_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make", "ar[1]: ar2 = ar[1].split(',', 1) co = ar2[0] post =", "parse_input(inp) parents.append(parent) # Return to group return res def parse_input(inp):", "parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type #", "'bposition' scale = parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res =", "== node.outputs[2]: # Lifetime particle_info['lifetime'] = True return 'p_lifetime' if", "{2})'.format(bc1, bc2, fac_var, fac_inv_var) out_roughness = '({0} * {3} +", "do_convert: if not os.path.isfile(unpack_filepath): fmt = 'PNG' if new_ext ==", "parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'NORMAL_MAP': if", "== 'OBJECT_INFO': if socket == node.outputs[2]: # Object Index curshader.add_uniform('float", "start)) return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) *", "if socket == node.outputs[0]: return '{0}.r'.format(col) elif socket == node.outputs[1]:", "if socket == node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 /", "== 'On' else '0.0' elif socket == node.outputs[4]: # Size", "if strength != '1.0': frag.write('n.xy *= {0};'.format(strength)) frag.write('n = normalize(TBN", "group inp = parent.inputs[index] res = parse_input(inp) parents.append(parent) # Return", "fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' curshader.write('float {0}", "'MATH': val1 = parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1]) op = node.operation", "# distortion = parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0} * {1} *", "os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else: if not os.path.isfile(arm.utils.asset_path(filepath)):", "if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_WAVE':", "= 'floor({0})'.format(val1) elif op == 'CEIL': out_val = 'ceil({0})'.format(val1) elif", "# detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) # Slow..", "== None: return inp = output_node.inputs[index] parents.append(node) out_group = parse_input(inp)", "node.outputs[2]: # Tangent return 'wtangent' elif socket == node.outputs[3]: #", "# ior = parse_vector_input(node.inputs[14]) # transmission = parse_vector_input(node.inputs[15]) # transmission_roughness", "- float(gl_FrontFacing))' elif socket == node.outputs[7]: # Pointiness return '0.0'", "return 'normalize({0})'.format(vec1) elif node.type == 'DISPLACEMENT': height = parse_value_input(node.inputs[0]) midlevel", "write_bump(node, res, 0.1) return res elif node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave)", "curves[2].points), fac) elif node.type == 'CURVE_RGB': # RGB Curves fac", "_parse_surface parse_opacity = _parse_opacity basecol_only = _basecol_only emission_found = False", "name texpath = arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile)", "link='{0}'.format(node.name)) return nn else: return to_vec3(socket.default_value) elif node.type == 'TEX_BRICK':", "_parse_surface, _parse_opacity, _parse_displacement, _basecol_only): global parsed # Compute nodes only", "+ {2} * {0} / {1}))'.format(col1, col2, fac_var) elif blend", "# RGB if node.type == 'GROUP': return parse_group(node, socket) elif", "ext = ['1', '2', '3', '4'] else: ext = ['2',", "store_var_name(node): return node_name(node.name) + '_store' def texture_store(node, tex, tex_name, to_linear=False,", "= {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res,", "= 5 elif(t >= 3315.0): i = 4 elif(t >=", "col2, col3, scale) if sample_bump: write_bump(node, res) return res elif", "{2})'.format(rough1, rough2, fac_var, fac_inv_var) out_metallic = '({0} * {3} +", "'TANGENT': out_val = 'tan({0})'.format(val1) elif op == 'ARCSINE': out_val =", "return elif l.from_node.type == 'NORMAL_MAP': return None return res_var def", "in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): # No", "curshader = tese else: curshader = vert out_disp = parse_displacement_input(node.inputs[2])", "= 0.994478524 rgb[2] = 1.56626022 elif (t < 965.0): rgb[0]", "'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2]) def node_by_type(nodes, ntype): for n", "== 'RGB': return parse_vector_input(inp) elif inp.type == 'RGBA': return parse_vector_input(inp)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "post = ')' curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0,", "'VOLUME_SCATTER': pass return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission", "only for now return 'vcolor' elif node.type == 'ATTRIBUTE': if", "mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): # No uvlayers", "{3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var) out_roughness =", "mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad(): return mat_state.texture_grad def mat_get_material():", "Displacement if _parse_displacement and disp_enabled() and node.inputs[2].is_linked: parsed = {}", "parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4]) res = 'tex_brick({0} * {4}, {1},", "under the Apache License, Version 2.0 (the \"License\"); # you", "parse_surface: write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0]) # subsurface = parse_vector_input(node.inputs[1]) #", "snoise256', link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "if node.rotation[1] != 0.0: # a = node.rotation[1] # out", "- {1})))'.format(dotnv, blend) elif node.type == 'LIGHT_PATH': if socket ==", "node.inputs['Location'].enabled else [0.0, 0.0, 0.0] if scale[0] != 1.0 or", "= node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1},", "l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return", "= '' return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link)) else: global", "0.0, 0.0]) elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co =", "return '{0}.rgb'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co", "({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position) # Write", "open(unpack_filepath, 'wb') as f: f.write(image.packed_file.data) # Copy non-ascii texture else:", "= parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) # Slow.. res =", "'.x', curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name", "# Occlude out_occlusion = '0.0' elif node.type == 'BSDF_REFRACTION': #", "write_result(l) st = l.from_socket.type if st == 'RGB' or st", "out_val = 'cos({0})'.format(val1) elif op == 'TANGENT': out_val = 'tan({0})'.format(val1)", "+ '_bump' # Testing.. get function parts.. ar = res.split('(',", "'.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac)", "curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position)) # Mix color # float", "= mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve", "parse_value_input(node.inputs[5]) # specular_tint = parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7]) # aniso", "texture parsed[tex_store] = True curshader.write_textures += 1 curshader.write('vec4 {0} =", "'SINE': out_val = 'sin({0})'.format(val1) elif op == 'COSINE': out_val =", "emission_found = True emission_strength = parse_value_input(node.inputs[1]) out_basecol = '({0} *", "tex_link=tex_link)) curshader.write_textures -= 1 return res elif node.image == None:", "vec3(0.5))))'.format(col1, col2, fac_var) if node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else:", "parse_value_input(node.inputs[0]) return '0.0' elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked:", "{0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1}", "spec1, opac1, emi1 = parse_shader_input(node.inputs[0]) bc2, rough2, met2, occ2, spec2,", "'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'VERTEX_COLOR': con.add_elem('col', 'short4norm')", "inp.is_linked == False: return if normal_parsed: return normal_parsed = True", "== 'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type", "'({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2]) if rotation[2]", "return to_vec3([rgb[0], rgb[1], rgb[2]]) elif node.type == 'VALTORGB': # ColorRamp", "the License. # import math import bpy import os import", "'VALUE': res = parse_value(l.from_node, l.from_socket) if res == None: return", "elif socket == node.outputs[4]: # Size particle_info['size'] = True return", "not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' + matname + '/' + image.name", "frag if basecol_only: return if inp.is_linked == False: return if", "= parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4]) res = 'tex_brick({0} * {4},", "* 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3", "blend == 'MIX': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "to_linear, tex_link=tex_link)) curshader.write_textures -= 1 return res elif node.image ==", "'anisotropic' tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation ==", "curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures -= 1", "if socket == node.outputs[0]: # Index particle_info['index'] = True return", "tex['generate_mipmaps'] = True elif interpolation == 'Closest': tex['min_filter'] = 'point'", "= {1};'.format(res_var, res)) # Normal map already parsed, return elif", "'0.5' elif node.type == 'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked:", "res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale) if sample_bump: write_bump(node, res)", "+ '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked:", "= node.image != None and node.image.colorspace_settings.name == 'sRGB' res =", "fac)) curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var)) bc1,", "* vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2]) if rotation[2] !=", "out_col = '(vec3((1.0 - {2}) * {0} + {2} *", "either express or implied. # See the License for the", "'PNG' if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt)", "parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4])", "vec3(2.2));'.format(tex_store)) return tex_store def write_bump(node, res, scl=0.001): global sample_bump global", "[0,0,0] blackbody_table_r = [ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00],", ": 0.5 / (1.0 - {1})))'.format(dotnv, blend) elif node.type ==", "Normal map already parsed, return elif l.from_node.type == 'NORMAL_MAP': return", "0.5)'.format(rough1, rough2) out_metallic = '({0} * 0.5 + {1} *", "texCoord.y, 0.0)' elif node.type == 'RGB': if node.arm_material_param: nn =", "# Generated - bounds return 'bposition' elif socket == node.outputs[1]:", "texCoord.y, 0.0)' elif socket == node.outputs[3]: # Object return 'mposition'", "== node.outputs[1]: # Normal return 'n' if curshader.shader_type == 'frag'", "== 'frag' else 'wnormal' elif socket == node.outputs[4]: # Incoming", "{1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var) elif blend ==", "to_uniform(inp) else: return to_vec3(inp.default_value) def parse_vector(node, socket): global particle_info global", "- Do not use Normal Map node with Armory PBR,", "elif node.type == 'PARTICLE_INFO': if socket == node.outputs[0]: # Index", "parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else: return None def parse_vector_input(inp): if inp.is_linked:", "'CAMERA': # View Vector in camera space return 'vVecCam' elif", "'TEX_COORD': #obj = node.object #instance = node.from_instance if socket ==", "= 1.0 - {2};'.format(prefix, fac_inv_var, fac_var)) bc1, rough1, met1, occ1,", "== node.outputs[2]: # UV con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x, 1.0 -", "or location[1] != 0.0 or location[2] != 0.0: out =", "= {0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular =", "arm.assets.add_embedded_data(path) def mat_name(): return mat_state.material.name def mat_batch(): return mat_state.batch def", "return None elif node.type == 'VECT_TRANSFORM': #type = node.vector_type #conv_from", "return None if do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')", "emission_found = True # clearcoar_normal = parse_vector_input(node.inputs[20]) # tangent =", "ior = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else:", "= 4.70366907 rgb[1] = 0.0 rgb[2] = 0.0 else: if", "l.from_node.type == 'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else: return None", "const for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i,", "node.type == 'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0]) #", "if hasattr(mat_user.data, 'uv_layers'): lays = mat_user.data.uv_layers # Second uvmap referenced", "elif node.type == 'BSDF_HAIR': pass elif node.type == 'HOLDOUT': if", "interp == 'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var) else: # Linear #", "False rpdat = arm.utils.get_rp() if rpdat.arm_rp_displacement == 'Tessellation' and tese", "else: # Linear # Write facs array facs_var = node_name(node.name)", "True return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif", "node.outputs[1]: # Age particle_info['age'] = True return 'p_age' if arm.utils.get_rp().arm_particles", "fac)) col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) blend = node.blend_type", "= parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type", "= parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'tex_voronoi({0} *", "node.type == 'ATTRIBUTE': if socket == node.outputs[0]: # Color con.add_elem('col',", "= '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True # clearcoar_normal = parse_vector_input(node.inputs[20]) #", "'0.0' elif node.type == 'CAMERA': # View Z Depth if", "tex_link=None): global sample_bump global sample_bump_res global parsed tex_store = store_var_name(node)", "= parse_value_input(node.inputs[1]) res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale)", "node.type == 'CURVE_VEC': # Vector Curves fac = parse_value_input(node.inputs[0]) vec", "= parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale) if sample_bump:", "== 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'MIX_SHADER': prefix", "'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket ==", "= 'abs({0})'.format(val1) elif op == 'MINIMUM': out_val = 'min({0}, {1})'.format(val1,", "assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise)", "parse_displacement=True, basecol_only=False): output_node = node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node != None:", "shader - frag for surface / tese for displacement global", "'MIX_SHADER': prefix = '' if node.inputs[0].is_linked else 'const ' fac", "if len(lays) > 1 and node.attribute_name == lays[1].name: con.add_elem('tex1', 'short2norm')", "'': if image.packed_file is not None: filepath = './' +", "= node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node == None: return inp =", "if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV'", "socket == node.outputs[1]: # Normal return 'n' elif socket ==", "location[2] != 0.0: out = '({0} + vec3({1}, {2}, {3}))'.format(out,", "= 'anisotropic' tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation", "2011-2013 Blender Foundation # # Licensed under the Apache License,", "return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket) else: out_basecol = 'vec3(0.8)' out_roughness", "socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type", "else None if tex != None: curshader.write_textures += 1 to_linear", "== 'DIVIDE': out_col = '(vec3((1.0 - {2}) * {0} +", "Temp curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);') curshader.write(f'if (texCoordBlend.x", "{2})'.format(emi1, emi2, fac_var, fac_inv_var) if parse_opacity: out_opacity = '({0} *", "== node.outputs[0]: # Generated - bounds return 'bposition' elif socket", "dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' if socket", "use this file except in compliance with the License. #", "rgb[2] = 1.56626022 elif (t < 965.0): rgb[0] = 4.70366907", "/ 150.0)'.format(wl) # Vector elif node.type == 'CAMERA': # View", "rpdat = arm.utils.get_rp() texfilter = rpdat.arm_texture_filter if texfilter == 'Anisotropic':", "# No uvlayers for Curve lays = mat_user.data.uv_layers # Second", "copy non-ascii texture unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if", "for now return 'vcolor' elif node.type == 'ATTRIBUTE': if socket", "{1}, {2})'.format(col1, col2, fac_var) # Revert to mix # out_col", "tex_name, uv_name)) curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name,", "'({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2) out_emission =", "for displacement global con global vert global frag global geom", "* {1}))'.format(co, scale) if sample_bump: write_bump(node, res) return res elif", "'SEPRGB': col = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.r'.format(col)", "co = parse_vector_input(node.inputs[0]) else: co = 'bposition' col1 = parse_vector_input(node.inputs[1])", "'log({0})'.format(val1) elif op == 'SQRT': out_val = 'sqrt({0})'.format(val1) elif op", "if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic", "(vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var) elif", "+ vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_2", "'UVMAP': #instance = node.from_instance con.add_elem('tex', 'short2norm') mat = mat_get_material() mat_users", "if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size: with open(unpack_filepath, 'wb')", "num != 0 def is_ascii(s): return len(s) == len(s.encode()) ##", "if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket == node.outputs[5]:", "non-ascii texture else: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath):", "else '0.0' elif socket == node.outputs[1]: # Age particle_info['age'] =", "[4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ] blackbody_table_g = [", "node.from_instance con.add_elem('tex', 'short2norm') mat = mat_get_material() mat_users = mat_get_material_users() if", "curshader.add_uniform('float time', link='_time') return 'time' else: return '0.0' elif node.type", "1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked:", "sample_bump: write_bump(node, res) return res elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker)", "'{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link)) else: tex_store = store_var_name(node) #", "nanometers return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl) # Vector elif", "to mix elif blend == 'COLOR': out_col = 'mix({0}, {1},", "'0' for i in range(1, len(points)): index += ' +", "if texfilter == 'Anisotropic': interpolation = 'Smart' elif texfilter ==", "socket == node.outputs[8]: # Ray Depth return '0.0' elif socket", "# else defaults to linear if image_node.extension != 'REPEAT': #", "l.from_node.type == 'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket) else: out_basecol", "else: # VALUE return 'vec3({0})'.format(res_var) else: if inp.type == 'VALUE':", "= '' wrd = bpy.data.worlds['Arm'] # Surface if parse_surface or", "+ 0.66))'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res", "'NORMAL_MAP': return None return res_var def glsl_type(t): if t ==", "'clamp' tex['v_addressing'] = 'clamp' if image.source == 'MOVIE': tex['source'] =", "'0.0' elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co =", "str(v) def to_vec3(v): return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2]) def", "arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return arm.utils.safesrc(name) def", "i in range(0, len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position)) #", "== None: return None curshader.write('float {0} = {1};'.format(res_var, res)) #", "# Intercept # Thickness return '0.5' elif node.type == 'LAYER_WEIGHT':", "'0.0' elif socket == node.outputs[9]: # Transparent Depth return '0.0'", "== 'MODULO': # out_val = 'float({0} % {1})'.format(val1, val2) out_val", "= inp.links[0] if l.from_node.type == 'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp)", "- 450 to 600 nanometers return 'wavelength_to_rgb(({0} - 450.0) /", "'tex_voronoi({0} * {1}).rgb'.format(co, scale) if sample_bump: write_bump(node, res) return res", "- (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac)", "'' else: res = 'n' return res elif node.type ==", "!= None: curshader.write('n = {0};'.format(normal_res)) def is_parsed(s): global parsed return", "out_metallic = '0.0' out_occlusion = '1.0' out_specular = '1.0' out_opacity", "dotnv) elif socket == node.outputs[1]: # Facing return '(1.0 -", "3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b", "return 'vcolor' elif node.type == 'ATTRIBUTE': if socket == node.outputs[0]:", "= os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath", "import arm.utils import arm.make_state import arm.log import arm.material.mat_state as mat_state", "(texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if", "0.0)' elif node.type == 'BUMP': # Interpolation strength strength =", "else 'const ' fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) +", "CELLS res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale) if sample_bump: write_bump(node,", "+ '/' + image.name + ' - invalid file path')", "socket == node.outputs[2]: # Is Diffuse Ray return '1.0' elif", "'0.0' elif node.type == 'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass elif node.type", "= parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5]) # specular_tint = parse_vector_input(node.inputs[6]) out_roughness", "curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var)) bc1, rough1,", "x * cos(theta) - y * sin(theta) # x *", "))'.format(ys_var, index_var, fac_var, facs_var) def write_normal(inp): if inp.is_linked and inp.links[0].from_node.type", "[0.0, 0.0, 0.0, 0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08,", "if node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x, 1.0 -", "= 'asin({0})'.format(val1) elif op == 'ARCCOSINE': out_val = 'acos({0})'.format(val1) elif", "0.5)'.format(met1, met2) out_occlusion = '({0} * 0.5 + {1} *", "arm.log.warn(matname + '/' + image.name + ' - file extension", "= '1.0' elif node.type == 'EMISSION': if parse_surface: # Multiply", "referenced if len(lays) > 1 and node.attribute_name == lays[1].name: con.add_elem('tex1',", "== 'LINEAR': f = '{0}.x'.format(co) elif grad == 'QUADRATIC': f", "warn(text): arm.log.warn(text) def assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name():", "not in ('jpg', 'png', 'hdr', 'mp4') # Convert image if", "['1', '2', '3', '4'] else: ext = ['2', '1', '4',", "'tex_brick_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res", "+ node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec3(socket.default_value)", "+ '_' + s if curshader.write_textures > 0: s +=", "= parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) if", "node_name(node.name) + '_fac_inv' curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac)) curshader.write('{0}float", "to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() +", "= 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33), tex_noise({0}", "arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[2]: #", "socket): index = socket_index(node, socket) parent = parents.pop() # Leaving", "out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to", "= parse_vector_input(node.inputs[13]) # ior = parse_vector_input(node.inputs[14]) # transmission = parse_vector_input(node.inputs[15])", "'vVec' elif socket == node.outputs[5]: # Parametric return 'mposition' elif", "return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link)) else: global parsed tex_store", "(vec3(1.0) - {0}))));'.format(col1, col2, fac) elif blend == 'LINEAR_LIGHT': out_col", "emission_strength) elif node.type == 'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3]) out_roughness =", "sample_bump_res = '' wrd = bpy.data.worlds['Arm'] # Surface if parse_surface", "== node.outputs[0]: # Is Camera Ray return '1.0' elif socket", "TBN matrix frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))", "cycles - 450 to 600 nanometers return 'wavelength_to_rgb(({0} - 450.0)", "# Slow.. res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1}", "== 'NORMAL_MAP': warn(mat_name() + ' - Do not use Normal", "vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp", "> 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;') else: if", "if not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath = os.path.join(unpack_path, image.name + \".jpg\")", "bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1]) bc2,", "'DARKEN': out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var) elif", "'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket ==", "-2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ] if (t >= 12000): rgb[0] =", "vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures -= 1 return '{0}.rgb'.format(tex_store) elif", "node.type == 'OBJECT_INFO': if socket == node.outputs[2]: # Object Index", "'1.0' out_metallic = '1.0' elif node.type == 'VOLUME_ABSORPTION': pass elif", "== node.outputs[1]: return '{0}.g'.format(col) elif socket == node.outputs[2]: return '{0}.b'.format(col)", "# Temp curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);') curshader.write(f'if", "== 'PARTICLE_INFO': if socket == node.outputs[0]: # Index particle_info['index'] =", "node.outputs[3]: # Material Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif", "* 0.5)'.format(met1, met2) out_occlusion = '({0} * 0.5 + {1}", "'BSDF_TOON': # write_normal(node.inputs[3]) pass elif node.type == 'BSDF_TRANSLUCENT': if parse_surface:", "{1})'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type", "== 'NEW_GEOMETRY': if socket == node.outputs[6]: # Backfacing return '(1.0", "node_name(node.name) + '_fac' fac_inv_var = node_name(node.name) + '_fac_inv' curshader.write('{0}float {1}", "# Link image path to assets # TODO: Khamake converts", "'param_' + node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return", "i in range(1, len(elems)): index += ' + ({0} >", "- {0}.y)'.format(uv_name) else: uv_name = 'texCoord' triplanar = node.projection ==", "!= 0.0: # a = node.rotation[0] # out = 'vec3({0}.y", "out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var) elif", "0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [-2.02524603e-11,", "* {2})'.format(emi1, emi2, fac_var, fac_inv_var) if parse_opacity: out_opacity = '({0}", "s = safesrc(s) if '__' in s: # Consecutive _", "{1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[0] != 0.0: #", "out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_HAIR': pass", "parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4]) res = 'tex_brick({0}", "elif socket == node.outputs[9]: # Transparent Depth return '0.0' elif", "* ({1} - vec3(0.5))))'.format(col1, col2, fac_var) if node.use_clamp: return 'clamp({0},", "node.max[0], node.max[1]) return out elif node.type == 'NORMAL': if socket", "i = 0 r = blackbody_table_r[i] g = blackbody_table_g[i] b", "contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr)", "'VOLUME_ABSORPTION': pass elif node.type == 'VOLUME_SCATTER': pass return out_basecol, out_roughness,", "socket == node.outputs[2]: # Lifetime particle_info['lifetime'] = True return 'p_lifetime'", "parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col,", "global parse_opacity global basecol_only global emission_found global particle_info global sample_bump", "normal_parsed = True frag.write_normal += 1 if not get_arm_export_tangents() or", "{1})'.format(val1, val2) elif op == 'MAXIMUM': out_val = 'max({0}, {1})'.format(val1,", "node_name(node.name) # mapping.curves[0].points[0].handle_type # bezier curve return '(vec3({0}, {1}, {2})", "* {2}, {0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out,", "({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3", "vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac) elif node.type", "'bposition' scale = parse_value_input(node.inputs[3]) res = 'tex_checker_f({0}, {1})'.format(co, scale) if", "# if node.rotation[0] != 0.0: # a = node.rotation[0] #", "texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type", "emission_found = True if parse_opacity: out_opacity = parse_value_input(node.inputs[1]) else: return", "else: return to_vec3(socket.default_value) elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked:", "parse_vector_input(node.inputs[0]) elif node.type == 'BSDF_TOON': # write_normal(node.inputs[3]) pass elif node.type", "ColorRamp fac = parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation elems = node.color_ramp.elements", "* sin(theta) + y * cos(theta) out = 'vec3({0}.x *", "col2, fac_var) if node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else: return", "for p in parents: s = p.name + '_' +", "len(node.inputs) > 20: out_opacity = parse_value_input(node.inputs[18]) elif node.type == 'BSDF_DIFFUSE':", "elif node.type == 'TANGENT': return 'wtangent' elif node.type == 'TEX_COORD':", "Entering group index = socket_index(node, socket) output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT')", "elif node.type == 'WIREFRAME': #node.use_pixel_size # size = parse_value_input(node.inputs[0]) return", "curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "0.0 or location[2] != 0.0: out = '({0} + vec3({1},", "vec + '.y', curves[3].points), vector_curve(name + '3c', vec + '.z',", "== node.outputs[5]: # Is Reflection Ray return '0.0' elif socket", "non-ascii texture unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not", "Camera Ray return '1.0' elif socket == node.outputs[1]: # Is", "{3}))'.format(out, node.min[0], node.min[1]) # if node.use_max: # out = 'min({0},", "invalid file path') return None # Reference image name texpath", "to noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "mat_get_material().arm_decal: # Compute TBN matrix frag.write('vec3 texn = ({0}) *", "elif grad == 'QUADRATIC_SPHERE': f = '0.0' elif grad ==", "not exist yet if image.packed_file is not None: if not", "mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for", "tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext if image.packed_file", "'TEX_MUSGRAVE': # Fall back to noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co", "# Transmission Depth return '0.0' elif node.type == 'OBJECT_INFO': if", "res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale) if sample_bump:", "socket_index(node, socket) output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node == None:", "{0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec3(socket.default_value) elif node.type ==", "= ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl')", "c_functions import shutil emission_found = False particle_info = None #", "op == 'COSINE': out_val = 'cos({0})'.format(val1) elif op == 'TANGENT':", "Occlusion out_occlusion = parse_value_input(node.inputs[2]) # Roughness out_roughness = parse_value_input(node.inputs[3]) #", "{2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_3 = textureOffset({1}, {2}.xy,", "socket == node.outputs[2]: return '{0}.z'.format(vec) elif node.type == 'VECT_MATH': vec1", "cols_var = node_name(node.name) + '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO:", "ior = parse_vector_input(node.inputs[14]) # transmission = parse_vector_input(node.inputs[15]) # transmission_roughness =", "node.type == 'UVMAP': #instance = node.from_instance con.add_elem('tex', 'short2norm') mat =", "rgb[0] = r[0] * t_inv + r[1] * t +", "global curshader # Active shader - frag for surface /", "2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump = False if to_linear: curshader.write('{0}.rgb =", "cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)' # View Distance else: curshader.add_uniform('vec3", "\"JPEG\") else: arm.log.warn(matname + '/' + image.name + ' -", "return to_vec3(node.outputs[0].default_value) elif socket == node.outputs[1]: # TODO: is parse_value", "fac_var)) bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1])", "else: if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' + matname + '/'", "= True curshader.write_textures += 1 curshader.write('vec4 {0} = vec4(1.0, 0.0,", "return '0.0' elif socket == node.outputs[6]: # Is Transmission Ray", "= 'vec3({0}.y * {1} - {0}.z * {2}, {0}.y *", "return 'objectInfoMaterialIndex' elif socket == node.outputs[4]: # Random curshader.add_uniform('float objectInfoRandom',", "node.type == 'AMBIENT_OCCLUSION': if parse_surface: # Single channel out_occlusion =", "0.0, 0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01],", "os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else: if", "basecol_only = _basecol_only emission_found = False particle_info = {} particle_info['index']", "'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl) # Vector elif node.type ==", "elif blend == 'LIGHTEN': out_col = 'max({0}, {1} * {2})'.format(col1,", "node.outputs[1]: return '{0}.y'.format(vec) elif socket == node.outputs[2]: return '{0}.z'.format(vec) elif", "out_emission = '0.0' if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'):", "curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3]) scale", "detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) # Slow.. res", "1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var) elif node.type ==", "+ {0}.z * {0}.z), 0.0)'.format(co) res = '(clamp({0}, 0.0, 1.0))'.format(f)", "Singular Ray return '0.0' elif socket == node.outputs[5]: # Is", "rotation[2] != 0.0: # ZYX rotation, Z axis for now..", "col2, fac_var) elif blend == 'OVERLAY': out_col = 'mix({0}, {1},", "= float(parse_value_input(node.inputs[0])) rgb = [0,0,0] blackbody_table_r = [ [2.52432244e+03, -1.06185848e-03,", "'_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const for i", "elif socket == node.outputs[5]: # Velocity particle_info['velocity'] = True return", "'({0} * {1})'.format(out_basecol, emission_strength) elif node.type == 'BSDF_GLASS': if parse_surface:", "* {1}).a)'.format(co, scale) else: # CELLS res = 'tex_voronoi({0} *", "= parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(r, g,", "limitations under the License. # import math import bpy import", "'(vec3({0}) * {1})'.format(height, scale) def parse_normal_map_color_input(inp, strength_input=None): global normal_parsed global", "or mat_get_material().arm_decal: # Compute TBN matrix frag.write('vec3 texn = ({0})", "on size request, cache size instead powimage = is_pow(image.size[0]) and", "'param_' + node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return", "v[1], v[2]) def node_by_type(nodes, ntype): for n in nodes: if", "node.type == 'TEX_POINTDENSITY': # Pass through return to_vec3([0.0, 0.0, 0.0])", "node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0] if scale[0] !=", "and inp.is_uniform: return to_uniform(inp) else: return to_vec1(inp.default_value) def parse_value(node, socket):", "'float({0} > {1})'.format(val1, val2) elif op == 'ROUND': # out_val", "# detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res =", "False particle_info['lifetime'] = False particle_info['location'] = False particle_info['size'] = False", "= parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3]) return '(vec3({0})", "write_bump(node, res) return res elif node.type == 'BRIGHTCONTRAST': out_col =", "{2})'.format(col1, col2, fac_var) elif blend == 'DARKEN': out_col = 'min({0},", "+ {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var) if parse_opacity: out_opacity", "'bposition' scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion", "= 'Smart' elif texfilter == 'Linear': interpolation = 'Linear' elif", "= node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type # bezier curve", "= node_name(node.name) + '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make", "fmt = 'PNG' if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image,", "distance = parse_value_input(node.inputs[1]) sample_bump = True height = parse_value_input(node.inputs[2]) sample_bump", "= parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0} *", "val2) elif op == 'GREATER_THAN': out_val = 'float({0} > {1})'.format(val1,", "_basecol_only): global parsed # Compute nodes only once global parents", "== 'GREATER_THAN': out_val = 'float({0} > {1})'.format(val1, val2) elif op", "* 2.0 - 1.0;'.format(parse_vector_input(inp))) if strength_input != None: strength =", "== 'png' else 'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else: # Write", "= res_var_name(l.from_node, l.from_socket) # Unparsed node if not is_parsed(res_var): parsed[res_var]", "return parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if st", "sample_bump global sample_bump_res sample_bump_res = store_var_name(node) + '_bump' # Testing..", "= 'ceil({0})'.format(val1) elif op == 'FRACT': out_val = 'fract({0})'.format(val1) elif", "tex, tex_name, to_linear, tex_link=tex_link)) curshader.write_textures -= 1 return res elif", "({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3", "s[1].lower() do_convert = ext not in ('jpg', 'png', 'hdr', 'mp4')", "res elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "node.outputs[0]: # Generated - bounds return 'bposition' elif socket ==", "= '({0} * {1})'.format(val1, val2) elif op == 'DIVIDE': out_val", "0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store,", "node.invert: ext = ['1', '2', '3', '4'] else: ext =", "== 'MATH': val1 = parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1]) op =", "== 'GROUP': if node.node_tree.name.startswith('Armory PBR'): # Displacement if socket ==", "= parse_value_input(node.inputs[2]) # Roughness out_roughness = parse_value_input(node.inputs[3]) # Metallic out_metallic", "= '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2]) if", "return 'float' def to_uniform(inp): uname = safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type)", "Constant, linear, quadratic # Shaders default to quadratic for now", "+ '.y', curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points))", "'0.0' elif socket == node.outputs[10]: # Transmission Depth return '0.0'", "0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],", "20: out_opacity = parse_value_input(node.inputs[18]) elif node.type == 'BSDF_DIFFUSE': if parse_surface:", "'vec3({0}, {1}, {2})'.format(x, y, z) elif node.type == 'VECT_MATH': vec1", "Shadow Ray return '0.0' elif socket == node.outputs[2]: # Is", "node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node == None: return inp = output_node.inputs[index]", "* {1}).a'.format(co, scale) else: # CELLS res = 'tex_voronoi({0} *", "return to_vec3([0.0, 0.0, 0.0]) else: if mat_batch() and inp.is_uniform: return", "'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1, vec2) elif op == 'CROSS_PRODUCT': return", "+ 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} +", "matname + '/' + image.name + ' - file not", "co, post, scl)) sample_bump = False def to_vec1(v): return str(v)", "= parse_value_input(node.inputs[5]) # specular_tint = parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7]) #", "== node.outputs[1]: # TODO: is parse_value path preferred? nor =", "= 'vec3(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return res", "node.outputs[1]: return parse_value_input(node.inputs[7]) else: return None else: return parse_group(node, socket)", "node.outputs[2]: # Lifetime particle_info['lifetime'] = True return 'p_lifetime' if arm.utils.get_rp().arm_particles", "# You may obtain a copy of the License at", "{2}, {0}.x * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a),", "== 'AVERAGE': return '(({0} + {1}) / 2.0)'.format(vec1, vec2) elif", "curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store def write_bump(node, res, scl=0.001):", "fac_inv_var) if parse_opacity: out_opacity = '({0} * {3} + {1}", "parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5,", "= _tesc tese = _tese parse_surface = _parse_surface parse_opacity =", "*= ({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength))", "textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_3 = textureOffset({1},", "scale = parse_value_input(node.inputs[4]) res = 'tex_brick_f({0} * {1})'.format(co, scale) if", "def get_rp_renderer(): return arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name):", "= '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link)) curshader.write_textures -= 1 return res", "changes if not os.path.isfile(converted_path): fmt = 'PNG' if new_ext ==", "socket == node.outputs[6]: # Angular Velocity particle_info['angular_velocity'] = True return", "- {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_TRANSPARENT': if parse_opacity: out_opacity =", "only once global parents global normal_parsed global curshader # Active", "== 'On' else 'vec3(0.0)' elif socket == node.outputs[6]: # Angular", "{1} - {0}.z * {2}, {0}.x * {2} + {0}.z", "elif op == 'FRACT': out_val = 'fract({0})'.format(val1) elif op ==", "'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1])", "] blackbody_table_b = [ [0.0, 0.0, 0.0, 0.0], [0.0, 0.0,", "textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_4 = textureOffset({1},", "node.outputs[8]: # Ray Depth return '0.0' elif socket == node.outputs[9]:", "parse_vector_input(node.inputs[20]) # tangent = parse_vector_input(node.inputs[21]) if parse_opacity: if len(node.inputs) >", "return 'vec3' else: return 'float' def to_uniform(inp): uname = safesrc(inp.node.name)", "'round({0})'.format(val1) out_val = 'floor({0} + 0.5)'.format(val1) elif op == 'FLOOR':", "node.operation if op == 'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1, vec2) else:", "# ColorRamp fac = parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation elems =", "== 'SUBTRACT': out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2,", "= False curshader = frag out_basecol, out_roughness, out_metallic, out_occlusion, out_specular,", "tex != None: curshader.write_textures += 1 to_linear = node.image !=", "Metallic out_metallic = parse_value_input(node.inputs[4]) # Normal if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type", "out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1]) return out", "particle_info['angular_velocity'] = True return 'vec3(0.0)' elif node.type == 'TANGENT': return", "col = parse_vector_input(node.inputs[0]) return '((({0}.r * 0.3 + {0}.g *", "'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path = os.path.join(unpack_path, tex['file']) #", "True return '1.0' elif node.type == 'VALUE': if node.arm_material_param: nn", "+= texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z > 0) {tex_store}", "(texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;') else:", "'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var) elif blend ==", "uvmap referenced if len(lays) > 1 and node.attribute_name == lays[1].name:", "tex_name, tex_link=tex_link)) curshader.write_textures -= 1 return res elif node.image ==", "= parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col,", "Return to group return res def parse_input(inp): if inp.type ==", "spec2, opac2, emi2 = parse_shader_input(node.inputs[2]) if parse_surface: out_basecol = '({0}", "res = 'tex_wave_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res)", "tex['mipmap_filter'] = 'no' tex['generate_mipmaps'] = False return tex def is_pow(num):", "to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_SKY': # Pass through", "out = 'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x", "- start)) return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}])", "index)) if interp == 'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var) else: #", "if not os.path.isfile(converted_path): fmt = 'PNG' if new_ext == 'png'", "sample_bump = False if to_linear: curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store)) return", "vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y',", "blackbody_table_b = [ [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0,", "s = tex['file'].rsplit('.', 1) if len(s) == 1: arm.log.warn(matname +", "elif blend == 'DODGE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "path') return None # Reference image name texpath = arm.utils.asset_path(filepath)", "node.outputs[2]: # UV con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x, 1.0 - texCoord.y,", "def to_vec1(v): return str(v) def to_vec3(v): return 'vec3({0}, {1}, {2})'.format(v[0],", "tangent = parse_vector_input(node.inputs[21]) if parse_opacity: if len(node.inputs) > 20: out_opacity", "elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "particle_info['index'] = False particle_info['age'] = False particle_info['lifetime'] = False particle_info['location']", "+ '2', vec + '.z', curves[2].points), fac,\\ vector_curve(name + '3a',", "rough2) out_metallic = '({0} * 0.5 + {1} * 0.5)'.format(met1,", "2.0 (the \"License\"); # you may not use this file", "# Return to group return res def parse_input(inp): if inp.type", "global sample_bump global sample_bump_res # RGB if node.type == 'GROUP':", "(t < 965.0): rgb[0] = 4.70366907 rgb[1] = 0.0 rgb[2]", "= parse_value_input(node.inputs[3]) # Slow.. res = 'vec3(tex_noise({0} * {1}), tex_noise({0}", "socket == node.outputs[1]: return '{0}.g'.format(col) elif socket == node.outputs[2]: return", "= frag out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission =", "vec2(0.0);') # Temp curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);')", "{1}, {2}, {3})'.format(co, col1, col2, col3, scale) if sample_bump: write_bump(node,", "= parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_specular = '0.0' elif node.type", "== 'LINEAR_LIGHT': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "= '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_TRANSPARENT': if parse_opacity:", "'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac) elif", "{1} * {2})'.format(met1, met2, fac_var, fac_inv_var) out_occlusion = '({0} *", "!= 0.0: out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0],", "== \"GENERATED\": unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not", "parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) # detail", "till drivers are implemented if node.attribute_name == 'time': curshader.add_uniform('float time',", "fac_inv_var) out_roughness = '({0} * {3} + {1} * {2})'.format(rough1,", "not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path = os.path.join(unpack_path, tex['file']) # TODO: delete", "# Linear # Write facs array facs_var = node_name(node.name) +", "2.0 * {1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend)", "# Already fetched if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name)", "= './' + image.name has_ext = filepath.endswith(('.jpg', '.png', '.hdr')) if", "not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path, tex['file']) if do_convert: if", "pre, co, post, scl)) curshader.write('float {0}_2 = {1}{2} + vec3({4},", "t + g[2] rgb[2] = ((b[0] * t + b[1])", "# Is Camera Ray return '1.0' elif socket == node.outputs[1]:", "'HOLDOUT': if parse_surface: # Occlude out_occlusion = '0.0' elif node.type", "image_node.image if matname is None: matname = mat_state.material.name if image", "'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket) else: out_basecol = 'vec3(0.8)'", "= parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2]) fac =", "'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1, vec2) elif op == 'NORMALIZE': return", "False curshader = frag out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity,", "= vert out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp = {0};'.format(out_disp)) def", "val, fac) elif node.type == 'INVERT': fac = parse_value_input(node.inputs[0]) out_col", "= 'PNG' if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, unpack_filepath,", "= 'point' tex['mag_filter'] = 'point' # else defaults to linear", "= '0.0' elif grad == 'EASING': f = '0.0' elif", "tex_name, uv_name)) curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name,", "elif op == 'MINIMUM': out_val = 'min({0}, {1})'.format(val1, val2) elif", "+ {0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0)", "1.0 or scale[1] != 1.0 or scale[2] != 1.0: out", "node.type == 'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv =", "'0.0' return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def", "elif node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "else: arm.log.warn(matname + '/' + image.name + ' - invalid", "= parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale)", "False return tex def is_pow(num): return ((num & (num -", "# Write Xs array facs_var = name + '_xs' curshader.write('float", "View Vector in camera space return 'vVecCam' elif node.type ==", "== None: return None curshader.write('vec3 {0} = {1};'.format(res_var, res)) elif", "'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_wave_f({0} * {1})'.format(co, scale)", "link='_objectInfoIndex') return 'objectInfoIndex' elif socket == node.outputs[3]: # Material Index", "+ ' - invalid file path') return None # Reference", "= os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path", "* {1})'.format(height, scale) def parse_normal_map_color_input(inp, strength_input=None): global normal_parsed global frag", "parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var,", "height = parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2]) nor", "arm.make_state import arm.log import arm.material.mat_state as mat_state import arm.material.cycles_functions as", "blend = node.blend_type if blend == 'MIX': out_col = 'mix({0},", "elif op == 'CEIL': out_val = 'ceil({0})'.format(val1) elif op ==", "-2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store,", "parse_opacity: frag.write('opacity = {0} - 0.0002;'.format(out_opacity)) # Volume # parse_volume_input(node.inputs[1])", "interp = node.color_ramp.interpolation elems = node.color_ramp.elements if len(elems) == 1:", "parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if st ==", "texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name)) if sample_bump: sample_bump_res = tex_store curshader.write('float", "!= 'GROUP_INPUT': normal_res = parse_vector_input(inp) if normal_res != None: curshader.write('n", "'PNG' if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt)", "vec2 {uv_name}2 = vec2(0.0);') # Temp curshader.write(f'vec4 {tex_store} = vec4(0.0,", "= safesrc(s) if '__' in s: # Consecutive _ are", "index fac_var = name + '_fac' curshader.write('float {0} = {1};'.format(fac_var,", "See the License for the specific language governing permissions and", "= filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath))", "texfilter == 'Linear': interpolation = 'Linear' elif texfilter == 'Point':", "' + ({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0])", "to mix elif blend == 'VALUE': out_col = 'mix({0}, {1},", "to in writing, software # distributed under the License is", "{1}).a'.format(co, scale) else: # CELLS res = 'tex_voronoi({0} * {1}).r'.format(co,", "write_bump(node, res) return res elif node.type == 'LIGHT_FALLOFF': # Constant,", "parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement,", "write_bump(node, res) return res elif node.type == 'TEX_IMAGE': # Already", "node.type == 'CAMERA': # View Z Depth if socket ==", "'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "{1}), {2})'.format(col1, col2, fac_var) elif blend == 'DARKEN': out_col =", "{1}))'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type", "{1}, {2})'.format(col1, col2, fac_var) elif blend == 'ADD': out_col =", "parse_input(inp): if inp.type == 'SHADER': return parse_shader_input(inp) elif inp.type ==", "else 'wnormal' elif socket == node.outputs[2]: # Tangent return 'wtangent'", "= {} parents = [] normal_parsed = False rpdat =", "curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2])", "sample_bump if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): # Displacement", "* cos(theta) - y * sin(theta) # x * sin(theta)", "elif node.type == 'RGB': if node.arm_material_param: nn = 'param_' +", "detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0}", "{1}, {2})'.format(col1, col2, fac_var) elif blend == 'SCREEN': out_col =", "= False particle_info = None # Particle info export def", "'/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co", "compliance with the License. # You may obtain a copy", "== 'BSDF_TRANSPARENT': if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif", "# CELLS res = 'tex_voronoi({0} * {1}).r'.format(co, scale) if sample_bump:", "s = p.name + '_' + s if curshader.write_textures >", "(vec3(1.0) - {0}))'.format(col1, col2, fac_var) elif blend == 'DIVIDE': out_col", "{2}) * {0} + {2} * ((vec3(1.0) - {0}) *", "2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ] blackbody_table_g", "== 'DODGE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "# Thickness return '0.5' elif node.type == 'LAYER_WEIGHT': blend =", "'max({0}, {1})'.format(val1, val2) elif op == 'LESS_THAN': out_val = 'float({0}", "# transmission = parse_vector_input(node.inputs[15]) # transmission_roughness = parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked", "= parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3]) col =", "elif node.type == 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return", "op == 'ARCTANGENT': out_val = 'atan({0})'.format(val1) elif op == 'ARCTAN2':", "))'.format(cols_var, index_var, fac_var, facs_var) elif node.type == 'CURVE_VEC': # Vector", "return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl) # Vector elif node.type", "inp = output_node.inputs[index] parents.append(node) out_group = parse_input(inp) parents.pop() return out_group", "else: co = 'bposition' scale = parse_value_input(node.inputs[1]) # detail =", "uvmap referenced if len(lays) > 1 and node.uv_map == lays[1].name:", "= parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat,", "if socket == node.outputs[0]: return '{0}.x'.format(vec) elif socket == node.outputs[1]:", "'vec3(0.8)' out_roughness = '0.0' out_metallic = '0.0' out_occlusion = '1.0'", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "bright = parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1},", "elems[i].position) # Write index index_var = node_name(node.name) + '_i' curshader.write('int", "elif blend == 'DIVIDE': out_col = '(vec3((1.0 - {2}) *", "else: co = 'bposition' scale = parse_value_input(node.inputs[1]) if node.coloring ==", "= parse_vector_input(node.inputs[21]) if parse_opacity: if len(node.inputs) > 20: out_opacity =", "{0}), {1})'.format(blend, dotnv) elif socket == node.outputs[1]: # Facing return", "Revert to glossy out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic", "True # clearcoar_normal = parse_vector_input(node.inputs[20]) # tangent = parse_vector_input(node.inputs[21]) if", "Camera return 'vec3(0.0)' # 'vposition' elif socket == node.outputs[5]: #", "if node.inputs['Location'].enabled else [0.0, 0.0, 0.0] if scale[0] != 1.0", "blend) elif node.type == 'LIGHT_PATH': if socket == node.outputs[0]: #", "vec2 = parse_vector_input(node.inputs[1]) op = node.operation if op == 'DOT_PRODUCT':", "== node.outputs[5]: # Velocity particle_info['velocity'] = True return 'p_velocity' if", "curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name)) if sample_bump: sample_bump_res", "disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text) def assets_add(path): arm.assets.add(path) def", "(texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if", "elif node.type == 'RGBTOBW': col = parse_vector_input(node.inputs[0]) return '((({0}.r *", "node.type == 'SEPHSV': return '0.0' elif node.type == 'SEPRGB': col", "'SUBTRACT': out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var)", "or file does not exist yet if image.packed_file is not", "= parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[4]) res", "== node.outputs[1]: # Is Shadow Ray return '0.0' elif socket", "1.0 - {2};'.format(prefix, fac_inv_var, fac_var)) bc1, rough1, met1, occ1, spec1,", "parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3]) res = 'tex_checker({0}, {1}, {2}, {3})'.format(co,", "elif op == 'MAXIMUM': out_val = 'max({0}, {1})'.format(val1, val2) elif", "= 1.56626022 elif (t < 965.0): rgb[0] = 4.70366907 rgb[1]", "parse_value_input(node.inputs[18]) elif node.type == 'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2]) out_basecol =", "if scale[0] != 1.0 or scale[1] != 1.0 or scale[2]", "node.max[1]) return out elif node.type == 'NORMAL': if socket ==", "mat_batch(): return mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad(): return mat_state.texture_grad", "{1} - ({0}.y) * {2}, {0}.x * {2} + ({0}.y)", "({1}) * 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res))", "'no' tex['generate_mipmaps'] = False return tex def is_pow(num): return ((num", "res, scl=0.001): global sample_bump global sample_bump_res sample_bump_res = store_var_name(node) +", "else 'vec3(0.0)' elif socket == node.outputs[5]: # Velocity particle_info['velocity'] =", "make_texture(image_node, tex_name, matname=None): tex = {} tex['name'] = tex_name image", "{0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[0] !=", "node.type == 'WIREFRAME': #node.use_pixel_size # size = parse_value_input(node.inputs[0]) return '0.0'", "curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png')", "+ '.z', curves[2].points), fac,\\ vector_curve(name + '3a', vec + '.x',", "+ '3c', vec + '.z', curves[3].points)) elif node.type == 'COMBHSV':", "uname) return uname def store_var_name(node): return node_name(node.name) + '_store' def", "== ntype: return n def socket_index(node, socket): for i in", "out_specular = '({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2)", "elif socket == node.outputs[4]: # Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return", "!= os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else: if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material", "if l.from_node.type == 'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var = write_result(l) st", "interpolation == 'Smart': # Mipmap anisotropic tex['min_filter'] = 'anisotropic' tex['mipmap_filter']", "Transmission Depth return '0.0' elif node.type == 'OBJECT_INFO': if socket", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "socket == node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif socket == node.outputs[1]: #", "ext[3])) curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1}) *", "anisotropic tex['min_filter'] = 'anisotropic' tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True", "socket == node.outputs[0]: # Is Camera Ray return '1.0' elif", "'float({0} < {1})'.format(val1, val2) elif op == 'GREATER_THAN': out_val =", "= '({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2) if", "{2}, {3})'.format(co, col1, col2, col3, scale) if sample_bump: write_bump(node, res)", "if n.type == ntype: return n def socket_index(node, socket): for", "rgb[2]]) elif node.type == 'VALTORGB': # ColorRamp fac = parse_value_input(node.inputs[0])", "parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op = node.operation if op ==", "in ('jpg', 'png', 'hdr', 'mp4') # Convert image if do_convert:", "pre, co, post, scl)) sample_bump = False def to_vec1(v): return", "curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float", "== 'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0]) # subsurface", "* {0} / {1}))'.format(col1, col2, fac_var) elif blend == 'DIFFERENCE':", "return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link)) else: tex_store = store_var_name(node)", "return '1.0' elif socket == node.outputs[3]: # Is Glossy Ray", "None: strength = parse_value_input(strength_input) if strength != '1.0': frag.write('n.xy *=", "curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a, {0}_b,", "'/' + image.name + ' - file extension required for", "{0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures -= 1 return", "full images on size request, cache size instead powimage =", "sample_bump: write_bump(node, res, 0.1) return res elif node.type == 'TEX_MUSGRAVE':", "'NEW_GEOMETRY': if socket == node.outputs[0]: # Position return 'wposition' elif", "3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05,", "vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2',", "elif op == 'MULTIPLY': out_val = '({0} * {1})'.format(val1, val2)", "elif node.type == 'VALUE': if node.arm_material_param: nn = 'param_' +", "if curshader.write_textures > 0: s += '_texread' s = safesrc(s)", "parse_vector_input(node.inputs[0]) out_emission = '1.0' emission_found = True emission_strength = parse_value_input(node.inputs[1])", "out_opacity = '1.0' out_emission = '0.0' if node.type == 'GROUP':", "== 'UVMAP': #instance = node.from_instance con.add_elem('tex', 'short2norm') mat = mat_get_material()", "elif socket == node.outputs[7]: # Pointiness return '0.0' elif node.type", "if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[4]:", "{0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const for i in range(0,", "= g[0] * t_inv + g[1] * t + g[2]", "'': if node.invert: ext = ['1', '2', '3', '4'] else:", "index = '0' for i in range(1, len(elems)): index +=", "elif node.type == 'CAMERA': # View Vector in camera space", "= {1};'.format(res_var, res)) elif st == 'VALUE': res = parse_value(l.from_node,", "0.826270103 rgb[1] = 0.994478524 rgb[2] = 1.56626022 elif (t <", "col2, fac_var) elif blend == 'ADD': out_col = 'mix({0}, {0}", "* {1} - ({0}.y) * {2}, {0}.x * {2} +", "= 'tex_noise({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res, 0.1)", "out_val = 'atan({0})'.format(val1) elif op == 'ARCTAN2': out_val = 'atan({0},", "frag global geom global tesc global tese global parse_surface global", "elif(t >= 1449.0): i = 2 elif(t >= 1167.0): i", "wrd.world_defs: frag.write('emission = {0};'.format(out_emission)) if parse_opacity: frag.write('opacity = {0} -", "tex = {} tex['name'] = tex_name tex['file'] = '' return", "parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_VELVET':", "Revert to mix elif blend == 'BURN': out_col = 'mix({0},", "socket == node.outputs[4]: # Camera return 'vec3(0.0)' # 'vposition' elif", "elif node.type == 'CAMERA': # View Z Depth if socket", "val2) elif op == 'MAXIMUM': out_val = 'max({0}, {1})'.format(val1, val2)", "# Empty texture tex = {} tex['name'] = tex_name tex['file']", "node.color_ramp.elements if len(elems) == 1: return to_vec3(elems[0].color) # Write cols", "node.outputs[2]: return '{0}.b'.format(col) elif node.type == 'SEPXYZ': vec = parse_vector_input(node.inputs[0])", "{1};'.format(res_var, res)) elif st == 'VALUE': res = parse_value(l.from_node, l.from_socket)", "curves = node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type # bezier", "geom global tesc global tese global parse_surface global parse_opacity global", "{} parents = [] normal_parsed = False curshader = frag", "KIND, either express or implied. # See the License for", "parse_group_input(node, socket): index = socket_index(node, socket) parent = parents.pop() #", "out_emission = '({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2)", "def is_ascii(s): return len(s) == len(s.encode()) ## def get_rp_renderer(): return", "i, points[i].location[0])) # Map vector return 'mix({0}[{1}], {0}[{1} + 1],", "'bposition' scale = parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale)", "',' + ar2[1] else: co = ar[1][:-1] post = ')'", "float(parse_value_input(node.inputs[0])) rgb = [0,0,0] blackbody_table_r = [ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00],", "len(elems)): index += ' + ({0} > {1} ? 1", "elif (t < 965.0): rgb[0] = 4.70366907 rgb[1] = 0.0", "(the \"License\"); # you may not use this file except", "= mat_user.data.uv_layers # Second uvmap referenced if len(lays) > 1", "Transmission Ray return '0.0' elif socket == node.outputs[7]: # Ray", "parse_vector_input(inp) elif inp.type == 'VALUE': return parse_value_input(inp) def parse_shader_input(inp): if", "emission_strength = parse_value_input(node.inputs[1]) out_basecol = '({0} * {1})'.format(out_basecol, emission_strength) elif", "= _geom tesc = _tesc tese = _tese parse_surface =", "# out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1]) #", "Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex' elif socket == node.outputs[3]:", "inp.links[0] if l.from_node.type == 'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var = write_result(l)", "parse_surface: out_basecol = '({0} + {1})'.format(bc1, bc2) out_roughness = '({0}", "- {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac) elif blend", "return 'n' if curshader.shader_type == 'frag' else 'wnormal' elif socket", "index index_var = node_name(node.name) + '_i' curshader.write('int {0} = {1};'.format(index_var,", "* {1} * 0.5)'.format(co, scale) if sample_bump: write_bump(node, res) return", "# # Unless required by applicable law or agreed to", "== 'SOFT_LIGHT': out_col = '((1.0 - {2}) * {0} +", "return '{0}[{1}]'.format(cols_var, index_var) else: # Linear # Write facs array", "co = 'bposition' scale = parse_value_input(node.inputs[4]) res = 'tex_brick_f({0} *", "tex['generate_mipmaps'] = True elif interpolation == 'Smart': # Mipmap anisotropic", "lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)' return", "len(elems)): curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2]))", "= parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr) elif", "'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name =", "y, z) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2", "def parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True,", "* t + b[3] # Pass constant return to_vec3([rgb[0], rgb[1],", "= 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) col3 =", "curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "{0}.x) / PI2 + 0.5'.format(co) elif grad == 'QUADRATIC_SPHERE': f", "== 'Anisotropic': interpolation = 'Smart' elif texfilter == 'Linear': interpolation", "node.type == 'SEPXYZ': vec = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]:", "# out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1]) return", "{uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp curshader.write(f'vec4", "# Compute nodes only once global parents global normal_parsed global", "= 'PNG' if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, converted_path,", "vector_curve(name + '2', vec + '.z', curves[2].points), fac,\\ vector_curve(name +", "elif image.source == \"GENERATED\": unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')", "((vec3(1.0) - {0}) * {1} * {0} + {0} *", "return arm.utils.get_sdk_path() def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text) def", "0.5)'.format(val1) elif op == 'FLOOR': out_val = 'floor({0})'.format(val1) elif op", "* 0.5 + {1} * 0.5)'.format(occ1, occ2) out_specular = '({0}", "* cos(theta) out = 'vec3({0}.x * {1} - ({0}.y) *", "= 'dotNV' if socket == node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel) return", "parse_surface: write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0]) elif node.type == 'BSDF_TOON': #", "parse_opacity = _parse_opacity basecol_only = _basecol_only emission_found = False particle_info", "tex_name, to_linear, tex_link=tex_link)) curshader.write_textures -= 1 return res elif node.image", "Testing.. get function parts.. ar = res.split('(', 1) pre =", "'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix #", "if sample_bump: write_bump(node, res) return res elif node.type == 'LIGHT_FALLOFF':", "Particle info export def parse(nodes, con, vert, frag, geom, tesc,", "{1})'.format(bc1, bc2) out_roughness = '({0} * 0.5 + {1} *", "{0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump =", "Extension parameter from the Texture node instead # if node.use_min:", "curshader.write_textures -= 1 return res elif node.image == None: #", "location[0], location[1], location[2]) # use Extension parameter from the Texture", "default to quadratic for now return '1.0' elif node.type ==", "{1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return", "# Second uvmap referenced if len(lays) > 1 and node.attribute_name", "'cos({0})'.format(val1) elif op == 'TANGENT': out_val = 'tan({0})'.format(val1) elif op", "'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'VALTORGB': # ColorRamp return", "texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) *", "= tex_store curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name,", "'0.0' elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' +", "= (pos - start) * (1.0 / (finish - start))", "matname is None: matname = mat_state.material.name if image is None:", "or st == 'RGBA' or st == 'VECTOR': return res_var", "elif op == 'LOGARITHM': out_val = 'log({0})'.format(val1) elif op ==", "global sample_bump global sample_bump_res global parsed tex_store = store_var_name(node) if", "facs_var = node_name(node.name) + '_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO:", "return to_vec1(inp.default_value) def parse_value(node, socket): global particle_info global sample_bump if", "+ image.name + ' - file extension required for image", "return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket", "n.type == ntype: return n def socket_index(node, socket): for i", "elif op == 'AVERAGE': return '(({0} + {1}) / 2.0)'.format(vec1,", "particle_info['lifetime'] = False particle_info['location'] = False particle_info['size'] = False particle_info['velocity']", "'tex_voronoi({0} * {1}).r'.format(co, scale) if sample_bump: write_bump(node, res) return res", "not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix frag.write('vec3 texn", "parse_vector_input(inp) else: return None def parse_vector_input(inp): if inp.is_linked: l =", "== 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "[3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ] blackbody_table_g = [ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01],", "{0}.b * 0.11) / 3.0) * 2.5)'.format(col) elif node.type ==", "= node.operation if op == 'ADD': out_val = '({0} +", "tex['v_addressing'] = 'clamp' if image.source == 'MOVIE': tex['source'] = 'movie'", "if rotation[2] != 0.0: # ZYX rotation, Z axis for", "node.type == 'CURVE_RGB': # RGB Curves fac = parse_value_input(node.inputs[0]) vec", "else: dotnv = 'dotNV' return 'fresnel({0}, {1})'.format(ior, dotnv) elif node.type", "filepath == '': if image.packed_file is not None: filepath =", "get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix frag.write('vec3 texn =", "inp.type == 'VECTOR': return parse_vector_input(inp) elif inp.type == 'VALUE': return", "VALUE return res_var else: if mat_batch() and inp.is_uniform: return to_uniform(inp)", "= 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co) elif grad ==", "data / copy non-ascii texture unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets',", "bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2]) if", "# Surface if parse_surface or parse_opacity: parsed = {} parents", "scale[0], scale[1], scale[2]) if rotation[2] != 0.0: # ZYX rotation,", "else: co = 'bposition' scale = parse_value_input(node.inputs[3]) res = 'tex_checker_f({0},", "== 'NORMAL': if socket == node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif socket", "'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr) elif node.type == 'GAMMA': out_col", "= '({0} * {1})'.format(out_basecol, emission_strength) elif node.type == 'BSDF_GLASS': if", "index += ' + ({0} > {1} ? 1 :", "None: filepath = './' + image.name has_ext = filepath.endswith(('.jpg', '.png',", "None if do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if", "= parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_noise({0} *", "s if curshader.write_textures > 0: s += '_texread' s =", "{1})'.format(out_basecol, emission_strength) elif node.type == 'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3]) out_roughness", "(1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var,", "col2, fac) elif blend == 'LINEAR_LIGHT': out_col = 'mix({0}, {1},", "if image.packed_file is not None: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath)", "== 'PARTICLE_INFO': if socket == node.outputs[3]: # Location particle_info['location'] =", "= parse_shader_input(node.inputs[1]) bc2, rough2, met2, occ2, spec2, opac2, emi2 =", "{1} * 0.5)'.format(co, scale) if sample_bump: write_bump(node, res) return res", "\"GENERATED\": unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path):", "+ vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl)) sample_bump =", "only for now return 'vcolor' else: # Vector con.add_elem('tex', 'short2norm')", "node.type == 'LIGHT_PATH': if socket == node.outputs[0]: # Is Camera", "else: # Link image path to assets # TODO: Khamake", "{1}))'.format(vec1, vec2) elif op == 'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1, vec2)", "{1}).r'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type", "col2 = parse_vector_input(node.inputs[2]) blend = node.blend_type if blend == 'MIX':", "+ '3a', vec + '.x', curves[3].points), vector_curve(name + '3b', vec", "parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5]) # specular_tint =", "elif op == 'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1, vec2) elif op", "return 'vec3(dot({0}, {1}))'.format(vec1, vec2) elif op == 'CROSS_PRODUCT': return 'cross({0},", "else 'jpg' tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext", "# TODO: Make const for i in range(0, len(elems)): curshader.write('{0}[{1}]", "out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var) elif", "'_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index = '0' for", "res elif node.type == 'TEX_POINTDENSITY': return '0.0' elif node.type ==", "fac_inv_var) out_specular = '({0} * {3} + {1} * {2})'.format(spec1,", "socket == node.outputs[6]: # Backfacing return '(1.0 - float(gl_FrontFacing))' elif", "= 'tex_voronoi({0} * {1}).r'.format(co, scale) if sample_bump: write_bump(node, res) return", "tex_store = store_var_name(node) # Pink color for missing texture parsed[tex_store]", "== 'RGB': if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('vec3", "b = blackbody_table_b[i] t_inv = 1.0 / t rgb[0] =", "{1}, {2})'.format(v[0], v[1], v[2]) def node_by_type(nodes, ntype): for n in", "0.0, 0.0]) elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/'", ".jpg? Convert ext to lowercase on windows if arm.utils.get_os() ==", "1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ] blackbody_table_g =", "1)[0] + '.' + new_ext if image.packed_file is not None", "'2', '3', '4'] else: ext = ['2', '1', '4', '3']", "float(gl_FrontFacing))' elif socket == node.outputs[7]: # Pointiness return '0.0' elif", "+ {2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1,", "+ {1})'.format(bc1, bc2) out_roughness = '({0} * 0.5 + {1}", "'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 -", "node.type == 'COMBRGB': r = parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1]) b", "f = '0.0' elif grad == 'SPHERICAL': f = 'max(1.0", "Index particle_info['index'] = True return 'p_index' if arm.utils.get_rp().arm_particles == 'On'", "info export def parse(nodes, con, vert, frag, geom, tesc, tese,", "# distortion = parse_value_input(node.inputs[3]) res = 'tex_noise({0} * {1})'.format(co, scale)", "scale[2] != 1.0: out = '({0} * vec3({1}, {2}, {3}))'.format(out,", "channel out_occlusion = parse_vector_input(node.inputs[0]) + '.r' elif node.type == 'BSDF_ANISOTROPIC':", "+ '_fac' fac_inv_var = node_name(node.name) + '_fac_inv' curshader.write('{0}float {1} =", "'uv_layers'): lays = mat_user.data.uv_layers # Second uvmap referenced if len(lays)", "parse_value_input(node.inputs[1]) res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale) if", "= '0.0' elif grad == 'DIAGONAL': f = '({0}.x +", "parse_shader_input(node.inputs[0]) bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1])", "link='_cameraPosition') return 'distance(eye, wposition)' elif node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior", "= '0.0' if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): if", "{3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var) out_metallic =", "{2})'.format(col1, col2, fac_var) elif blend == 'SCREEN': out_col = '(vec3(1.0)", "parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation elems = node.color_ramp.elements if len(elems) ==", "res = 'tex_brick_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res)", "st == 'VECTOR': return res_var else: # VALUE return 'vec3({0})'.format(res_var)", "# Unparsed node if not is_parsed(res_var): parsed[res_var] = True st", "'((({0}.r * 0.3 + {0}.g * 0.59 + {0}.b *", "0.5)'.format(emi1, emi2) if parse_opacity: out_opacity = '({0} * 0.5 +", "{1})'.format(vec1, vec2) elif op == 'SUBTRACT': return '({0} - {1})'.format(vec1,", "{0} = {1};'.format(res_var, res)) # Normal map already parsed, return", "> 1 and node.uv_map == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x,", "sample_bump_res = tex_store curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store,", "and disp_enabled() and node.inputs[2].is_linked: parsed = {} parents = []", "else 'vec3(0.0)' elif socket == node.outputs[6]: # Angular Velocity particle_info['angular_velocity']", "global normal_parsed global frag if basecol_only: return if inp.is_linked ==", "0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_3 = {1}{2}", "linear tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation ==", "curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png')", "elif node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0]) sat =", "return parse_vector_input(node.inputs[1]) else: #space = node.space #map = node.uv_map #", "curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv) elif", "parse_opacity: out_opacity = parse_value_input(node.inputs[1]) else: return parse_group(node, socket) elif node.type", "curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;')", "# Convert image if do_convert: new_ext = 'png' if (ext", "x = parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2]) return", "'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x * {2}", "* {0} + {2} * {0} / {1}))'.format(col1, col2, fac_var)", "ar2 = ar[1].split(',', 1) co = ar2[0] post = ','", "val2) elif op == 'POWER': out_val = 'pow({0}, {1})'.format(val1, val2)", "res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale) if sample_bump:", "socket == node.outputs[1]: # TODO: is parse_value path preferred? nor", "{2}) * {0} + {2} * {0} / {1}))'.format(col1, col2,", "parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2]) return 'vec3({0}, {1},", "1) co = ar2[0] post = ',' + ar2[1] else:", "store_var_name(node) + '_bump' # Testing.. get function parts.. ar =", "or scale[2] != 1.0: out = '({0} * vec3({1}, {2},", "cache when file changes if not os.path.isfile(converted_path): fmt = 'PNG'", "== 'MAXIMUM': out_val = 'max({0}, {1})'.format(val1, val2) elif op ==", "# parse_volume_input(node.inputs[1]) # Displacement if _parse_displacement and disp_enabled() and node.inputs[2].is_linked:", "{0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1,", "!= None: parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface,", "global vert global frag global geom global tesc global tese", "'vec3(0.0)' # Tangent Normal elif node.type == 'OBJECT_INFO': return 'wposition'", "= node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) col1", "out_group def parse_group_input(node, socket): index = socket_index(node, socket) parent =", "i in range(1, len(points)): index += ' + ({0} >", "objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif socket == node.outputs[4]: # Random", "- (vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1}))", "# Get index fac_var = name + '_fac' curshader.write('float {0}", "'max({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend == 'OVERLAY':", "-texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);') frag.write('n =", "'NEW_GEOMETRY': if socket == node.outputs[6]: # Backfacing return '(1.0 -", "= 'atan({0}, {1})'.format(val1, val2) if node.use_clamp: return 'clamp({0}, 0.0, 1.0)'.format(out_val)", "to /unpacked filepath += '.raw' elif image.source == \"GENERATED\": unpack_path", "= node.gradient_type if grad == 'LINEAR': f = '{0}.x'.format(co) elif", "socket == node.outputs[3]: # True Normal return 'n' if curshader.shader_type", "if res == None: return None curshader.write('float {0} = {1};'.format(res_var,", "'linear' tex['mipmap_filter'] = 'no' tex['generate_mipmaps'] = False return tex def", "not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath = os.path.join(unpack_path, image.name + \".jpg\") arm.utils.convert_image(image,", "'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' if socket == node.outputs[0]:", "out_opacity = parse_value_input(node.inputs[1]) else: return parse_group(node, socket) elif node.type ==", "elif node.type == 'TEX_ENVIRONMENT': # Pass through return to_vec3([0.0, 0.0,", "out_val = 'sqrt({0})'.format(val1) elif op == 'ABSOLUTE': out_val = 'abs({0})'.format(val1)", "# Revert to mix elif blend == 'SOFT_LIGHT': out_col =", "2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06,", "out_opacity = parse_value_input(node.inputs[18]) elif node.type == 'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2])", "1.0 - texCoord.y, 0.0)' elif node.type == 'BUMP': # Interpolation", "use Normal Map node with Armory PBR, connect Image Texture", "== 'MAPPING': out = parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value rotation =", "res, 0.1) return res elif node.type == 'TEX_POINTDENSITY': return '0.0'", "node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'MIX_SHADER':", "out_val = 'min({0}, {1})'.format(val1, val2) elif op == 'MAXIMUM': out_val", "Second uvmap referenced if len(lays) > 1 and node.attribute_name ==", "inp.links[0] if l.from_node.type == 'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l)", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "= parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type", "sample_bump: write_bump(node, res) return res elif node.type == 'LIGHT_FALLOFF': #", "+ b[2]) * t + b[3] # Pass constant return", "= 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1]) # if node.use_max:", "language governing permissions and # limitations under the License. #", "out_emission def parse_shader(node, socket): global emission_found out_basecol = 'vec3(0.8)' out_roughness", "out_basecol = '({0} + {1})'.format(bc1, bc2) out_roughness = '({0} *", "'3', '4'] else: ext = ['2', '1', '4', '3'] curshader.write('float", "'RGBA32': # tex['format'] = image_format interpolation = image_node.interpolation rpdat =", "'NORMAL_MAP': if curshader == tese: return parse_vector_input(node.inputs[1]) else: #space =", "if l.from_node.type == 'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else: return", "blackbody_table_g = [ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03,", "mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec3(inp.default_value) def parse_vector(node,", "= tex['file'].rsplit('.', 1)[0] + '.' + new_ext if image.packed_file is", "frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular)) if '_Emission' in wrd.world_defs:", "f = '{0}.x'.format(co) elif grad == 'QUADRATIC': f = '0.0'", "* 0.5)'.format(opac1, opac2) elif node.type == 'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19])", "Already fetched if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name) tex", "particle_info['age'] = True return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else", "!= 0.0 or location[2] != 0.0: out = '({0} +", "mat_state import arm.material.cycles_functions as c_functions import shutil emission_found = False", "* 0.5 + {1} * 0.5)'.format(met1, met2) out_occlusion = '({0}", "node.color_ramp.interpolation elems = node.color_ramp.elements if len(elems) == 1: return to_vec3(elems[0].color)", ": 0)'.format(fac_var, points[i].location[0]) # Write index index_var = name +", "scale = parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3]) return '(vec3({0}) * {1})'.format(height,", "node.type == 'DISPLACEMENT': height = parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1]) scale", "= node.projection == 'BOX' if triplanar: curshader.write(f'vec3 texCoordBlend = vec3(0.0);", "# ColorRamp return '1.0' elif node.type == 'MATH': val1 =", "range(0, len(node.outputs)): if node.outputs[i] == socket: return i def node_name(s):", "sheen_tint = parse_vector_input(node.inputs[11]) # clearcoat = parse_vector_input(node.inputs[12]) # clearcoat_rough =", "math.sin(a)) if location[0] != 0.0 or location[1] != 0.0 or", "= node.uv_map # Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None elif node.type", "gamma) elif node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0]) sat", "* 0.5)'.format(occ1, occ2) out_specular = '({0} * 0.5 + {1}", "_tesc tese = _tese parse_surface = _parse_surface parse_opacity = _parse_opacity", "range(0, len(elems)): curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1],", "snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "array facs_var = node_name(node.name) + '_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) #", "arm.log.warn(matname + '/' + image.name + ' - invalid file", "return parse_vector_input(inp) else: return None def parse_vector_input(inp): if inp.is_linked: l", "fac_var, fac_inv_var) out_roughness = '({0} * {3} + {1} *", "1.09090465e+00] ] blackbody_table_b = [ [0.0, 0.0, 0.0, 0.0], [0.0,", "'0.0' elif node.type == 'SEPRGB': col = parse_vector_input(node.inputs[0]) if socket", "output_node == None: return inp = output_node.inputs[index] parents.append(node) out_group =", "'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0]) # Roughly map to cycles", ">= 12000): rgb[0] = 0.826270103 rgb[1] = 0.994478524 rgb[2] =", "= tex_name tex['file'] = '' return '{0}.a'.format(texture_store(node, tex, tex_name, True,", "parse_value_input(node.inputs[0]) # Height multiplier # distance = parse_value_input(node.inputs[1]) sample_bump =", "tese for displacement global con global vert global frag global", "if parse_surface: frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic =", "Base color out_basecol = parse_vector_input(node.inputs[0]) # Occlusion out_occlusion = parse_value_input(node.inputs[2])", "'fract({0})'.format(val1) elif op == 'MODULO': # out_val = 'float({0} %", "1.56626022 elif (t < 965.0): rgb[0] = 4.70366907 rgb[1] =", "has_ext = filepath.endswith(('.jpg', '.png', '.hdr')) if not has_ext: # Raw", "CELLS res = 'tex_voronoi({0} * {1}).r'.format(co, scale) if sample_bump: write_bump(node,", "node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index =", "or st == 'RGBA' or st == 'VECTOR': res =", "sat, val, fac) elif node.type == 'INVERT': fac = parse_value_input(node.inputs[0])", "return res elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co", "rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0]) bc2, rough2,", "node.inputs[0].is_linked else 'const ' fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name)", "tex_name, True, tex_link=tex_link)) else: tex_store = store_var_name(node) # Pink color", "fac) elif blend == 'LINEAR_LIGHT': out_col = 'mix({0}, {1}, {2})'.format(col1,", "0.0]) elif node.type == 'TEX_SKY': # Pass through return to_vec3([0.0,", "parse_shader_input(node.inputs[2]) if parse_surface: out_basecol = '({0} * {3} + {1}", "inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_shader_input(l.from_node.inputs[0])", "law or agreed to in writing, software # distributed under", "{0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) -", "'time': curshader.add_uniform('float time', link='_time') return 'time' else: return '0.0' elif", "'Linear' elif texfilter == 'Point': interpolation = 'Closest' # TODO:", "+ 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co =", "(vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2,", "file path') return None # Reference image name texpath =", "for image name') return None ext = s[1].lower() do_convert =", "col2, fac_var) # Revert to mix elif blend == 'SOFT_LIGHT':", "vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp curshader.write(f'vec4 {tex_store} =", "# Normal map already parsed, return elif l.from_node.type == 'NORMAL_MAP':", "# Is Strand # Intercept # Thickness return '0.5' elif", "(vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1})) *", "t == 'RGB' or t == 'RGBA' or t ==", "r[1] * t + r[2] rgb[1] = g[0] * t_inv", "== 'HUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "= parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp = {0};'.format(out_disp)) def parse_group(node, socket): #", "Interpolation strength strength = parse_value_input(node.inputs[0]) # Height multiplier # distance", "tex['file']) # TODO: delete cache when file changes if not", "= socket_index(node, socket) output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node ==", "res = '(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return", "return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_shader(node,", "= parse_value_input(strength_input) if strength != '1.0': frag.write('n.xy *= {0};'.format(strength)) frag.write('n", "out_basecol = '({0} * {3} + {1} * {2})'.format(bc1, bc2,", "_geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only): global parsed #", "opac2, emi2 = parse_shader_input(node.inputs[2]) if parse_surface: out_basecol = '({0} *", "- ({0}), {1})'.format(out_col, fac) elif node.type == 'MIX_RGB': fac =", "{0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0,", "= 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_wave_f({0} * {1})'.format(co,", "res_var_name(node, socket): return node_name(node.name) + '_' + safesrc(socket.name) + '_res'", "= 'round({0})'.format(val1) out_val = 'floor({0} + 0.5)'.format(val1) elif op ==", "= mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): lays = mat_user.data.uv_layers # Second", "!= None and node.image.colorspace_settings.name == 'sRGB' res = '{0}.rgb'.format(texture_store(node, tex,", "if socket == node.outputs[0]: # Color con.add_elem('col', 'short4norm') # Vcols", "sample_bump = False nor = parse_vector_input(node.inputs[3]) if sample_bump_res != '':", "if image_node.extension != 'REPEAT': # Extend or clip tex['u_addressing'] =", "'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "else: global parsed tex_store = store_var_name(node) # Pink color for", "len(node.outputs)): if node.outputs[i] == socket: return i def node_name(s): for", "inp.links[0] if l.from_node.type == 'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else:", "'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0", "r = blackbody_table_r[i] g = blackbody_table_g[i] b = blackbody_table_b[i] t_inv", "node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name)", "* 0.5)'.format(rough1, rough2) out_metallic = '({0} * 0.5 + {1}", "return '0.0' elif node.type == 'OBJECT_INFO': if socket == node.outputs[2]:", "inp = parent.inputs[index] res = parse_input(inp) parents.append(parent) # Return to", "array ys_var = name + '_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points))) #", "'uv_layers'): # No uvlayers for Curve lays = mat_user.data.uv_layers #", "node.outputs[3]: # Location particle_info['location'] = True return 'p_location' if arm.utils.get_rp().arm_particles", "return res elif node.type == 'TEX_POINTDENSITY': # Pass through return", "or implied. # See the License for the specific language", "g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name)) else: curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store,", "res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale) if sample_bump: write_bump(node, res)", "== 'SEPXYZ': vec = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return", "'1.0' elif socket == node.outputs[3]: # Is Glossy Ray return", "# View Z Depth if socket == node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2", "output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node == None: return inp", "ext[1], ext[2], ext[3])) curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *=", "= '({0} * {3} + {1} * {2})'.format(met1, met2, fac_var,", "def parse_vector(node, socket): global particle_info global sample_bump global sample_bump_res #", "index_var, fac_var, facs_var) def write_normal(inp): if inp.is_linked and inp.links[0].from_node.type !=", "def parse_displacement_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type ==", "_vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only): global", "== 'MINIMUM': out_val = 'min({0}, {1})'.format(val1, val2) elif op ==", "+ {2} * ((vec3(1.0) - {0}) * {1} * {0}", "global parsed res_var = res_var_name(l.from_node, l.from_socket) # Unparsed node if", "- {0}.z * {2}, {0}.y * {2} + {0}.z *", "/ ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var)", "'wnormal' elif socket == node.outputs[4]: # Incoming return 'vVec' elif", "vec3({1}))'.format(out_col, gamma) elif node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0])", "col = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.r'.format(col) elif", "nn else: return to_vec3(socket.default_value) elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if", "socket == node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)'", "{1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var) if parse_opacity: out_opacity =", "'({0} + {1})'.format(bc1, bc2) out_roughness = '({0} * 0.5 +", "link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif socket == node.outputs[4]: # Random curshader.add_uniform('float", "curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const for i in", "location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0] if", "= 'linear' tex['mag_filter'] = 'linear' tex['mipmap_filter'] = 'no' tex['generate_mipmaps'] =", "= _parse_surface parse_opacity = _parse_opacity basecol_only = _basecol_only emission_found =", "elif node.type == 'NORMAL_MAP': if curshader == tese: return parse_vector_input(node.inputs[1])", "Blender Foundation # # Licensed under the Apache License, Version", "if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT': normal_res = parse_vector_input(inp) if", "'DODGE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "return 'vec3(0.0)' elif node.type == 'UVMAP': #instance = node.from_instance con.add_elem('tex',", "for surface / tese for displacement global con global vert", "'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33), tex_noise({0} *", "ar2[1] else: co = ar[1][:-1] post = ')' curshader.write('float {0}_1", "{2} * ((vec3(1.0) - {0}) * {1} * {0} +", "= parse_value_input(node.inputs[1]) op = node.operation if op == 'ADD': out_val", "- file extension required for image name') return None ext", "= {0};'.format(out_emission)) if parse_opacity: frag.write('opacity = {0} - 0.0002;'.format(out_opacity)) #", "to_vec1(node.outputs[0].default_value) elif node.type == 'WIREFRAME': #node.use_pixel_size # size = parse_value_input(node.inputs[0])", "' - file not found(' + filepath + ')') return", "if l.from_node.type == 'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket) else:", "parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value", "global parsed return s in parsed def res_var_name(node, socket): return", "vec4(0.0, 0.0, 0.0, 0.0);') curshader.write(f'if (texCoordBlend.x > 0) {tex_store} +=", "elif socket == node.outputs[1]: return '{0}.g'.format(col) elif socket == node.outputs[2]:", "'n' if curshader.shader_type == 'frag' else 'wnormal' elif socket ==", "out_val = 'mod({0}, {1})'.format(val1, val2) elif op == 'SINE': out_val", "co, post, scl)) curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4},", "node.outputs[1]: # TODO: is parse_value path preferred? nor = parse_vector_input(node.inputs[0])", "0 def is_ascii(s): return len(s) == len(s.encode()) ## def get_rp_renderer():", "return parse_shader_input(inp) elif inp.type == 'RGB': return parse_vector_input(inp) elif inp.type", "parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17]))", "Make const for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var,", "False sample_bump_res = '' wrd = bpy.data.worlds['Arm'] # Surface if", "res, 0.1) return res elif node.type == 'TEX_POINTDENSITY': # Pass", "0.5'.format(co) elif grad == 'QUADRATIC_SPHERE': f = '0.0' elif grad", "# Copyright 2011-2013 Blender Foundation # # Licensed under the", "* texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy)", "if inp.type == 'VALUE': # Unlinked reroute return to_vec3([0.0, 0.0,", "out_metallic = parse_value_input(node.inputs[4]) # Normal if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type ==", "x * sin(theta) + y * cos(theta) out = 'vec3({0}.x", "'FLOOR': out_val = 'floor({0})'.format(val1) elif op == 'CEIL': out_val =", "== 'SHADER': return parse_shader_input(inp) elif inp.type == 'RGB': return parse_vector_input(inp)", "Armory PBR, connect Image Texture directly') parse_normal_map_color_input(node.inputs[5]) # Emission if", "== 'VALUE': return parse_value_input(inp) def parse_shader_input(inp): if inp.is_linked: l =", "emi2, fac_var, fac_inv_var) if parse_opacity: out_opacity = '({0} * {3}", "mat_get_material_users() if mat_users != None and mat in mat_users: mat_user", "blend == 'SUBTRACT': out_col = 'mix({0}, {0} - {1}, {2})'.format(col1,", "== 'ADD_SHADER': bc1, rough1, met1, occ1, spec1, opac1, emi1 =", "== 'DIFFERENCE': out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2,", "4 elif(t >= 1902.0): i = 3 elif(t >= 1449.0):", "parse_vector_input(node.inputs[15]) # transmission_roughness = parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or node.inputs[17].default_value[0] !=", "{0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post,", "in wrd.world_defs: frag.write('emission = {0};'.format(out_emission)) if parse_opacity: frag.write('opacity = {0}", "0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True # clearcoar_normal =", "= tese else: curshader = vert out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3", "vec3(1.0))'.format(out_col) else: return out_col elif node.type == 'BLACKBODY': t =", "op == 'ARCSINE': out_val = 'asin({0})'.format(val1) elif op == 'ARCCOSINE':", "elif op == 'LESS_THAN': out_val = 'float({0} < {1})'.format(val1, val2)", "blend == 'DIVIDE': out_col = '(vec3((1.0 - {2}) * {0}", "Surface if parse_surface or parse_opacity: parsed = {} parents =", "0.0)' elif node.type == 'RGB': if node.arm_material_param: nn = 'param_'", "{1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var)) bc1, rough1, met1,", "{1} * {2})'.format(col1, col2, fac_var) elif blend == 'OVERLAY': out_col", "'MIX': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) elif blend", "return '({0} - {1})'.format(vec1, vec2) elif op == 'AVERAGE': return", "node.inputs[6].default_value != 0.0: out_emission = parse_value_input(node.inputs[6]) emission_found = True if", "# Write cols array cols_var = node_name(node.name) + '_cols' curshader.write('vec3", "- {2}) * {0} + {2} * {0} / {1}))'.format(col1,", "vert = _vert frag = _frag geom = _geom tesc", "== 'COMBXYZ': x = parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1]) z =", "'MULTIPLY': out_val = '({0} * {1})'.format(val1, val2) elif op ==", "else: if mat_texture_grad(): curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store,", "0.0)'.format(co) res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res)", "None # Particle info export def parse(nodes, con, vert, frag,", "curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const for i in", "+ '_store' def texture_store(node, tex, tex_name, to_linear=False, tex_link=None): global sample_bump", "if not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path, tex['file']) if do_convert:", "return res elif node.type == 'TEX_POINTDENSITY': return '0.0' elif node.type", "s.replace('_', '_x') return s ## def make_texture(image_node, tex_name, matname=None): tex", "parse_surface: out_basecol = '({0} * {3} + {1} * {2})'.format(bc1,", "{2}, {3}))'.format(out, node.min[0], node.min[1]) # if node.use_max: # out =", "group return res def parse_input(inp): if inp.type == 'SHADER': return", "subsurface = parse_vector_input(node.inputs[1]) # subsurface_radius = parse_vector_input(node.inputs[2]) # subsurface_color =", "parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1},", "op == 'ADD': out_val = '({0} + {1})'.format(val1, val2) elif", "1.0))'.format(f) if sample_bump: write_bump(node, res) return res elif node.type ==", "node_by_type(nodes, ntype): for n in nodes: if n.type == ntype:", "= 'acos({0})'.format(val1) elif op == 'ARCTANGENT': out_val = 'atan({0})'.format(val1) elif", "val2) if node.use_clamp: return 'clamp({0}, 0.0, 1.0)'.format(out_val) else: return out_val", "_con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only):", "else: curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name)) if sample_bump:", "'1.0' out_specular = '1.0' out_opacity = '1.0' out_emission = '0.0'", "{0}.y) * 0.5'.format(co) elif grad == 'RADIAL': f = 'atan({0}.y,", "'1', vec + '.y', curves[1].points), vector_curve(name + '2', vec +", "vec + '.x', curves[3].points), vector_curve(name + '3b', vec + '.y',", "# Index particle_info['index'] = True return 'p_index' if arm.utils.get_rp().arm_particles ==", "= rotation[2] # x * cos(theta) - y * sin(theta)", "= True elif interpolation == 'Smart': # Mipmap anisotropic tex['min_filter']", "write_bump(node, res) return res elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if", "scale[1] != 1.0 or scale[2] != 1.0: out = '({0}", "#instance = node.from_instance if socket == node.outputs[0]: # Generated -", "out_col = '({0} + {2} * (2.0 * ({1} -", "node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' col1 =", "node.type == 'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0]) elif", "write_bump(node, res) return res elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if", "else: dotnv = 'dotNV' if socket == node.outputs[0]: # Fresnel", "index fac_var = node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var,", "= parse_shader_input(node.inputs[0]) bc2, rough2, met2, occ2, spec2, opac2, emi2 =", "= safesrc(node.name) tex = make_texture(node, tex_name) tex_link = node.name if", "'_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const for i", "node.arm_material_param else None if tex != None: curshader.write_textures += 1", "node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type # bezier curve return", "arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket == node.outputs[5]: #", "node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "== 'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "the License for the specific language governing permissions and #", "'ARCSINE': out_val = 'asin({0})'.format(val1) elif op == 'ARCCOSINE': out_val =", "= {2};'.format(facs_var, i, elems[i].position)) # Mix color # float f", "node.outputs[0]: # Is Camera Ray return '1.0' elif socket ==", "'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath = os.path.join(unpack_path, image.name", "g[1] * t + g[2] rgb[2] = ((b[0] * t", "curves = node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0},", "== 'SQRT': out_val = 'sqrt({0})'.format(val1) elif op == 'ABSOLUTE': out_val", "i = 4 elif(t >= 1902.0): i = 3 elif(t", "node.uv_map # Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None elif node.type ==", "out_emission = '({0} * {3} + {1} * {2})'.format(emi1, emi2,", "node.outputs[3]: # Is Glossy Ray return '1.0' elif socket ==", "= False particle_info = {} particle_info['index'] = False particle_info['age'] =", "= parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness))", "socket): return node_name(node.name) + '_' + safesrc(socket.name) + '_res' def", "= ar[1].split(',', 1) co = ar2[0] post = ',' +", "clearcoat = parse_vector_input(node.inputs[12]) # clearcoat_rough = parse_vector_input(node.inputs[13]) # ior =", "== 'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness =", "{0}))'.format(col1, col2, fac_var) elif blend == 'DIVIDE': out_col = '(vec3((1.0", "== 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D", "tex['file'].rsplit('.', 1)[0] + '.' + new_ext if image.packed_file is not", "* {1})'.format(val1, val2) elif op == 'DIVIDE': out_val = '({0}", "tese: return parse_vector_input(node.inputs[1]) else: #space = node.space #map = node.uv_map", "0.0] if scale[0] != 1.0 or scale[1] != 1.0 or", "col2, fac_var) elif blend == 'SCREEN': out_col = '(vec3(1.0) -", "1 else: i = 0 r = blackbody_table_r[i] g =", "'.' + s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) # if image_format != 'RGBA32':", "Already fetched if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name) tex", "elif blend == 'LINEAR_LIGHT': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "elif socket == node.outputs[6]: # Reflection return 'vec3(0.0)' elif node.type", "] if (t >= 12000): rgb[0] = 0.826270103 rgb[1] =", "= pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store def write_bump(node, res, scl=0.001): global", "# distributed under the License is distributed on an \"AS", "+ {0}.y) * 0.5'.format(co) elif grad == 'RADIAL': f =", "return None def parse_vector_input(inp): if inp.is_linked: l = inp.links[0] if", "elif st == 'VALUE': res = parse_value(l.from_node, l.from_socket) if res", "Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom' elif node.type == 'PARTICLE_INFO':", "{1};'.format(fac_var, fac)) col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) blend =", "arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else: # Write bytes if size is", "write_bump(node, res, scl=0.001): global sample_bump global sample_bump_res sample_bump_res = store_var_name(node)", "'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2])", "'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var) elif blend ==", "socket == node.outputs[1]: # Facing return '(1.0 - pow({0}, ({1}", "fac) elif node.type == 'CURVE_RGB': # RGB Curves fac =", "== 'Linear': interpolation = 'Linear' elif texfilter == 'Point': interpolation", "if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return", "0.0]) elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv =", "Size particle_info['size'] = True return '1.0' elif node.type == 'VALUE':", "return None else: return parse_group(node, socket) elif node.type == 'GROUP_INPUT':", "to glossy out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic =", "array facs_var = name + '_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points))) #", "global particle_info global sample_bump global sample_bump_res con = _con vert", "Empty texture tex = {} tex['name'] = tex_name tex['file'] =", "curshader.write('n = {0};'.format(normal_res)) def is_parsed(s): global parsed return s in", "0.0, 0.0]) elif node.type == 'TEX_SKY': # Pass through return", "{3})'.format(co, col1, col2, scale) if sample_bump: write_bump(node, res) return res", "rgb = [0,0,0] blackbody_table_r = [ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03,", "-2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01]", "{1} ? 1 : 0)'.format(fac_var, elems[i].position) # Write index index_var", "Depth return '0.0' elif socket == node.outputs[9]: # Transparent Depth", "parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' fac_inv_var = node_name(node.name) +", "# Revert to mix elif blend == 'HUE': out_col =", "+ r[1] * t + r[2] rgb[1] = g[0] *", "= parse_value_input(node.inputs[1]) out_specular = '0.0' elif node.type == 'BSDF_GLOSSY': if", "col2, fac_var) # Revert to mix elif blend == 'VALUE':", "2.0 - 1.0;'.format(parse_vector_input(inp))) if strength_input != None: strength = parse_value_input(strength_input)", "curshader # Active shader - frag for surface / tese", "the Texture node instead # if node.use_min: # out =", "s in parsed def res_var_name(node, socket): return node_name(node.name) + '_'", "image.packed_file is not None: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) !=", "+= texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y > 0) {tex_store}", "* {0}.x + {0}.y * {0}.y + {0}.z * {0}.z),", "fac = parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5, {2},", "+ 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var) def write_normal(inp):", "node.type == 'RGBTOBW': col = parse_vector_input(node.inputs[0]) return '((({0}.r * 0.3", "'{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name) tex = make_texture(node, tex_name) tex_link =", "/ t rgb[0] = r[0] * t_inv + r[1] *", "curshader = vert out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp = {0};'.format(out_disp))", "'' return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link)) else: global parsed", "under the License is distributed on an \"AS IS\" BASIS,", "1.0;'.format(parse_vector_input(inp))) if strength_input != None: strength = parse_value_input(strength_input) if strength", "if node.outputs[i] == socket: return i def node_name(s): for p", "out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var) elif", "0.0: out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1],", "-= 1 def parse_value_input(inp): if inp.is_linked: l = inp.links[0] if", "index = socket_index(node, socket) parent = parents.pop() # Leaving group", "elif grad == 'EASING': f = '0.0' elif grad ==", "tex['min_filter'] = 'linear' tex['mag_filter'] = 'linear' tex['mipmap_filter'] = 'no' tex['generate_mipmaps']", "if parse_surface: out_basecol = '({0} * {3} + {1} *", "#map = node.uv_map # Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None elif", "-4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ] blackbody_table_b", "'BLACKBODY': t = float(parse_value_input(node.inputs[0])) rgb = [0,0,0] blackbody_table_r = [", "= _con vert = _vert frag = _frag geom =", "co = 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) scale", "= make_texture(node, tex_name) tex_link = node.name if node.arm_material_param else None", "op == 'MINIMUM': out_val = 'min({0}, {1})'.format(val1, val2) elif op", "None: curshader.write_textures += 1 to_linear = node.image != None and", "* {3})'.format(\\ vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name", "vec + '.z', curves[3].points)) elif node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h", "not has_ext: # Raw bytes, write converted .jpg to /unpacked", "= parse_value_input(node.inputs[1]) res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale)", "curshader.write('int {0} = {1};'.format(index_var, index)) if interp == 'CONSTANT': return", "curves[2].points), fac,\\ vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name", "'(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_TRANSPARENT': if parse_opacity: out_opacity", "0.5) ? 2.0 * {1} : 0.5 / (1.0 -", "if node.node_tree.name.startswith('Armory PBR'): # Displacement if socket == node.outputs[1]: return", "location[1] != 0.0 or location[2] != 0.0: out = '({0}", "if res == None: return None curshader.write('vec3 {0} = {1};'.format(res_var,", "== 'VECT_TRANSFORM': #type = node.vector_type #conv_from = node.convert_from #conv_to =", "- {0}))'.format(col1, col2, fac_var) elif blend == 'DIVIDE': out_col =", "'COMBRGB': r = parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2])", "curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0])) # Map vector return 'mix({0}[{1}],", "the License. # You may obtain a copy of the", "% {1})'.format(val1, val2) out_val = 'mod({0}, {1})'.format(val1, val2) elif op", "{3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var,", "image.name + ' - invalid file path') return None #", "out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) elif blend ==", "{0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0],", "normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res", "parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_noise({0} * {1})'.format(co,", "fac_var, fac)) curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var))", ">= 3315.0): i = 4 elif(t >= 1902.0): i =", "inp.type == 'RGB': return parse_vector_input(inp) elif inp.type == 'RGBA': return", "return 'vec3({0})'.format(res_var) else: if inp.type == 'VALUE': # Unlinked reroute", "{1} * {2})'.format(col1, col2, fac_var) elif blend == 'LIGHTEN': out_col", "node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png')", "Compute TBN matrix frag.write('vec3 texn = ({0}) * 2.0 -", "= 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var) elif blend", "{1})'.format(val1, val2) elif op == 'SINE': out_val = 'sin({0})'.format(val1) elif", "parse_shader_input(node.inputs[1]) if parse_surface: out_basecol = '({0} + {1})'.format(bc1, bc2) out_roughness", "TODO: Make const for i in range(0, len(elems)): curshader.write('{0}[{1}] =", "= 0.0 else: if (t >= 6365.0): i = 5", "out_val = 'float({0} % {1})'.format(val1, val2) out_val = 'mod({0}, {1})'.format(val1,", "node.type == 'OBJECT_INFO': return 'wposition' elif node.type == 'PARTICLE_INFO': if", "out_opacity, out_emission def parse_displacement_input(inp): if inp.is_linked: l = inp.links[0] if", "None def parse_vector_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type", "emission_found global particle_info global sample_bump global sample_bump_res con = _con", "parse_surface: # Occlude out_occlusion = '0.0' elif node.type == 'BSDF_REFRACTION':", "parse_value_input(node.inputs[1]) out_basecol = '({0} * {1})'.format(out_basecol, emission_strength) elif node.type ==", "out_metallic = '1.0' elif node.type == 'EMISSION': if parse_surface: #", "{2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if", "is_ascii(s): return len(s) == len(s.encode()) ## def get_rp_renderer(): return arm.utils.get_rp().rp_renderer", "({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var) def", "'distance(eye, wposition)' elif node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0])", "now mat = mat_get_material() mat_users = mat_get_material_users() if mat_users !=", "file_format=fmt) arm.assets.add(converted_path) else: # Link image path to assets #", "True elif interpolation == 'Smart': # Mipmap anisotropic tex['min_filter'] =", "texpath = arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile) s", "parents = [] normal_parsed = False rpdat = arm.utils.get_rp() if", "global sample_bump global sample_bump_res sample_bump_res = store_var_name(node) + '_bump' #", "# Vcols only for now return 'vcolor' elif node.type ==", "* (1.0 / (finish - start)) return 'mix({0}[{1}], {0}[{1} +", "len(lays) > 1 and node.uv_map == lays[1].name: con.add_elem('tex1', 'short2norm') return", "0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_2 = {1}{2}", "= ext not in ('jpg', 'png', 'hdr', 'mp4') # Convert", "def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name(): return mat_state.material.name def mat_batch(): return", "+ ' - file extension required for image name') return", "return '0.0' elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/'", "texCoordBlend.z;') else: if mat_texture_grad(): curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy,", "s += '_texread' s = safesrc(s) if '__' in s:", "i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0])) #", "sample_bump global sample_bump_res con = _con vert = _vert frag", "== False: return if normal_parsed: return normal_parsed = True frag.write_normal", "0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02],", "to load full images on size request, cache size instead", "mix elif blend == 'SATURATION': out_col = 'mix({0}, {1}, {2})'.format(col1,", "= 0 r = blackbody_table_r[i] g = blackbody_table_g[i] b =", "* {3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var) out_specular", "images on size request, cache size instead powimage = is_pow(image.size[0])", "op == 'ABSOLUTE': out_val = 'abs({0})'.format(val1) elif op == 'MINIMUM':", "filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) #", "'SUBTRACT': out_val = '({0} - {1})'.format(val1, val2) elif op ==", "if node.attribute_name == 'time': curshader.add_uniform('float time', link='_time') return 'time' else:", "Ray Depth return '0.0' elif socket == node.outputs[9]: # Transparent", "os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path = os.path.join(unpack_path, tex['file']) # TODO: delete cache", "1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00]", "parse_normal_map_color_input(inp, strength_input=None): global normal_parsed global frag if basecol_only: return if", "TODO: Khamake converts .PNG to .jpg? Convert ext to lowercase", "'Smart': # Mipmap anisotropic tex['min_filter'] = 'anisotropic' tex['mipmap_filter'] = 'linear'", "out_val = 'atan({0}, {1})'.format(val1, val2) if node.use_clamp: return 'clamp({0}, 0.0,", "# Is Transmission Ray return '0.0' elif socket == node.outputs[7]:", "return mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad(): return mat_state.texture_grad def", "'bposition' scale = parse_value_input(node.inputs[4]) res = 'tex_brick_f({0} * {1})'.format(co, scale)", "# Velocity particle_info['velocity'] = True return 'p_velocity' if arm.utils.get_rp().arm_particles ==", "parse_displacement_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE':", "= [ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05,", "'RADIAL': f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co) elif", "def parse_group_input(node, socket): index = socket_index(node, socket) parent = parents.pop()", "return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket", "mat_users = mat_get_material_users() if mat_users != None and mat in", "points[i].location[1])) # Get index fac_var = name + '_fac' curshader.write('float", "{2};'.format(ys_var, i, points[i].location[1])) # Get index fac_var = name +", "st = l.from_socket.type if st == 'RGB' or st ==", "+ b[3] # Pass constant return to_vec3([rgb[0], rgb[1], rgb[2]]) elif", "res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale) if sample_bump:", "curshader.write_textures += 1 res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link)) curshader.write_textures", "== 'VOLUME_ABSORPTION': pass elif node.type == 'VOLUME_SCATTER': pass return out_basecol,", "/ tese for displacement global con global vert global frag", "'min({0}, {1})'.format(val1, val2) elif op == 'MAXIMUM': out_val = 'max({0},", "True return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif", "False particle_info['location'] = False particle_info['size'] = False particle_info['velocity'] = False", "interpolation = 'Closest' # TODO: Blender seems to load full", "== 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D", "= False sample_bump_res = '' wrd = bpy.data.worlds['Arm'] # Surface", "# Copy non-ascii texture else: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath)", "'VALUE': return parse_value_input(inp) def parse_shader_input(inp): if inp.is_linked: l = inp.links[0]", "elif node.type == 'TEX_COORD': #obj = node.object #instance = node.from_instance", "'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass elif node.type == 'SUBSURFACE_SCATTERING': if parse_surface:", "if parse_surface: write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0]) # subsurface = parse_vector_input(node.inputs[1])", "'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'ATTRIBUTE': # Pass", "basecol out_basecol = parse_vector_input(node.inputs[0]) out_emission = '1.0' emission_found = True", "== 'Cubic': # Mipmap linear tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] =", "= {} tex['name'] = tex_name image = image_node.image if matname", "== 'FLOOR': out_val = 'floor({0})'.format(val1) elif op == 'CEIL': out_val", "'mposition' elif socket == node.outputs[4]: # Camera return 'vec3(0.0)' #", "else 'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else: # Write bytes if", "tex def is_pow(num): return ((num & (num - 1)) ==", "to mix # out_col = '({0} + {2} * (2.0", "Vector in camera space return 'vVecCam' elif node.type == 'NEW_GEOMETRY':", "blend == 'DARKEN': out_col = 'min({0}, {1} * {2})'.format(col1, col2,", "uv_name)) curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name))", "parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) #", "= 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node,", "return uname def store_var_name(node): return node_name(node.name) + '_store' def texture_store(node,", "global particle_info global sample_bump global sample_bump_res # RGB if node.type", "return 'vcolor' else: # Vector con.add_elem('tex', 'short2norm') # UVMaps only", "= parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.r'.format(col) elif socket", "curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float", "parse_vector_input(node.inputs[11]) # clearcoat = parse_vector_input(node.inputs[12]) # clearcoat_rough = parse_vector_input(node.inputs[13]) #", "else: ext = ['2', '1', '4', '3'] curshader.write('float {0}_fh1 =", "+= ' + ({0} > {1} ? 1 : 0)'.format(fac_var,", "specific language governing permissions and # limitations under the License.", "out_col = parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma)", "node_name(node.name) + '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const", "out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol", "+ '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const for", "= {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3])) curshader.write('{0}_fh1 *=", "return res def parse_input(inp): if inp.type == 'SHADER': return parse_shader_input(inp)", "disp_enabled() and node.inputs[2].is_linked: parsed = {} parents = [] normal_parsed", "= '1.0' out_opacity = '1.0' out_emission = '0.0' return out_basecol,", "i = 5 elif(t >= 3315.0): i = 4 elif(t", "= '1.0' out_emission = '0.0' if node.type == 'GROUP': if", "curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0]) uv_name =", "particle_info = {} particle_info['index'] = False particle_info['age'] = False particle_info['lifetime']", "= parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3]) res =", "== 'HAIR_INFO': # Is Strand # Intercept # Thickness return", "parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(x, y, z)", "-2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ] blackbody_table_b = [ [0.0,", "= parse_vector_input(node.inputs[9]) # sheen = parse_vector_input(node.inputs[10]) # sheen_tint = parse_vector_input(node.inputs[11])", "os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path, tex['file']) if do_convert: if not", "elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')", "- pow({0}, ({1} < 0.5) ? 2.0 * {1} :", "# Write bytes if size is different or file does", "res_var def glsl_type(t): if t == 'RGB' or t ==", "curshader.add_uniform(glsl_type(inp.type) + ' ' + uname) return uname def store_var_name(node):", "fac_inv_var) out_metallic = '({0} * {3} + {1} * {2})'.format(met1,", "arm.utils.get_sdk_path() def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text) def assets_add(path):", "t + r[2] rgb[1] = g[0] * t_inv + g[1]", "os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size: with open(unpack_filepath, 'wb') as f:", "= node_name(node.name) # mapping.curves[0].points[0].handle_type # bezier curve return '(vec3({0}, {1},", "ar[0] + '(' if ',' in ar[1]: ar2 = ar[1].split(',',", "node.type == 'VERTEX_COLOR': con.add_elem('col', 'short4norm') # Vcols only for now", "bounds return 'bposition' elif socket == node.outputs[1]: # Normal return", "strength_input=None): global normal_parsed global frag if basecol_only: return if inp.is_linked", "* (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var) if node.use_clamp:", "== 'INTENSITY': res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale) else: #", "ext = ['2', '1', '4', '3'] curshader.write('float {0}_fh1 = {0}_{1}", "# float f = (pos - start) * (1.0 /", "import os import arm.assets import arm.utils import arm.make_state import arm.log", "== node.outputs[6]: # Reflection return 'vec3(0.0)' elif node.type == 'UVMAP':", "= parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif node.type == 'COMBRGB':", "0.5)'.format(occ1, occ2) out_specular = '({0} * 0.5 + {1} *", "os.path.join(unpack_path, tex['file']) if do_convert: if not os.path.isfile(unpack_filepath): fmt = 'PNG'", "else: if (t >= 6365.0): i = 5 elif(t >=", "# Is Shadow Ray return '0.0' elif socket == node.outputs[2]:", "False particle_info['velocity'] = False particle_info['angular_velocity'] = False sample_bump = False", "in range(1, len(points)): index += ' + ({0} > {1}", "== 'DIAGONAL': f = '({0}.x + {0}.y) * 0.5'.format(co) elif", "'wposition' elif node.type == 'PARTICLE_INFO': if socket == node.outputs[3]: #", "grad == 'QUADRATIC_SPHERE': f = '0.0' elif grad == 'SPHERICAL':", "- {1})'.format(vec1, vec2) elif op == 'AVERAGE': return '(({0} +", "'0.0' elif node.type == 'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2]) out_basecol =", "Texture directly') parse_normal_map_color_input(node.inputs[5]) # Emission if node.inputs[6].is_linked or node.inputs[6].default_value !=", "if node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else: return out_col elif", "for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1]))", "None or not is_ascii(texfile): # Extract packed data / copy", "is_pow(image.size[0]) and is_pow(image.size[1]) if interpolation == 'Cubic': # Mipmap linear", "*= ({1}) * 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0,", "node instead # if node.use_min: # out = 'max({0}, vec3({1},", "global sample_bump_res sample_bump_res = store_var_name(node) + '_bump' # Testing.. get", "elif node.type == 'INVERT': fac = parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1])", "else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec3(inp.default_value)", "out_roughness = parse_value_input(node.inputs[7]) # aniso = parse_vector_input(node.inputs[8]) # aniso_rot =", "None return res_var def glsl_type(t): if t == 'RGB' or", "parse_value_input(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type", "return 'linearize(gl_FragCoord.z, cameraProj)' # View Distance else: curshader.add_uniform('vec3 eye', link='_cameraPosition')", "node.outputs[4]: # Size particle_info['size'] = True return '1.0' elif node.type", "'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic_f({0} * {1} *", "0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z >", "'{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name) tex = make_texture(node, tex_name) tex_link =", "vector_curve(name, fac, points): # Write Ys array ys_var = name", "emission_found = False particle_info = None # Particle info export", "out_metallic = '1.0' elif node.type == 'VOLUME_ABSORPTION': pass elif node.type", "return len(s) == len(s.encode()) ## def get_rp_renderer(): return arm.utils.get_rp().rp_renderer def", "out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0]) if", "parse_value_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE':", "'/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked:", "{1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var) out_emission = '({0} *", "# bezier curve return '(vec3({0}, {1}, {2}) * {3})'.format(\\ vector_curve(name", "'(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\\ vector_curve(name", "'n' elif socket == node.outputs[2]: # UV con.add_elem('tex', 'short2norm') return", "False nor = parse_vector_input(node.inputs[3]) if sample_bump_res != '': if node.invert:", "None: return None curshader.write('vec3 {0} = {1};'.format(res_var, res)) elif st", "{1}, {2})'.format(col1, col2, fac_var) elif blend == 'SUBTRACT': out_col =", "not is_parsed(res_var): parsed[res_var] = True st = l.from_socket.type if st", "'objectInfoMaterialIndex' elif socket == node.outputs[4]: # Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom')", "safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + ' ' + uname) return uname def", "'{0}.x'.format(co) elif grad == 'QUADRATIC': f = '0.0' elif grad", "node_name(node.name) + '_' + safesrc(socket.name) + '_res' def write_result(l): global", "return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked:", "socket == node.outputs[7]: # Ray Length return '0.0' elif socket", "{0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_HAIR': pass elif node.type == 'HOLDOUT':", "node.use_clamp: return 'clamp({0}, 0.0, 1.0)'.format(out_val) else: return out_val elif node.type", "is_parsed(s): global parsed return s in parsed def res_var_name(node, socket):", "displacement global con global vert global frag global geom global", "View Z Depth if socket == node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj',", "image.packed_file.size: with open(unpack_filepath, 'wb') as f: f.write(image.packed_file.data) # Copy non-ascii", "fac_var, fac_inv_var) out_metallic = '({0} * {3} + {1} *", "== 'NORMALIZE': return 'normalize({0})'.format(vec1) elif node.type == 'DISPLACEMENT': height =", "texfilter = rpdat.arm_texture_filter if texfilter == 'Anisotropic': interpolation = 'Smart'", "elif node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked:", "'.png', '.hdr')) if not has_ext: # Raw bytes, write converted", "node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found =", "= None # Particle info export def parse(nodes, con, vert,", "def parse_shader_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type ==", "frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);')", "node.operation if op == 'ADD': out_val = '({0} + {1})'.format(val1,", "= node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2}) * vec3({4},", "return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif node.type == 'COMBRGB': r =", "mat = mat_get_material() mat_users = mat_get_material_users() if mat_users != None", "len(s.encode()) ## def get_rp_renderer(): return arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents", "1449.0): i = 2 elif(t >= 1167.0): i = 1", "# if image_format != 'RGBA32': # tex['format'] = image_format interpolation", "tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation == 'Smart':", "is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name) tex = make_texture(node, tex_name)", "Texture node instead # if node.use_min: # out = 'max({0},", "v[2]) def node_by_type(nodes, ntype): for n in nodes: if n.type", "return '{0}.a'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co", "rgb[1] = 0.994478524 rgb[2] = 1.56626022 elif (t < 965.0):", "node.outputs[2]: # Object Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex' elif", "res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return", "fac_var) elif blend == 'OVERLAY': out_col = 'mix({0}, {1}, {2})'.format(col1,", "tex['file'] = '' return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link)) else:", "matname=None): tex = {} tex['name'] = tex_name image = image_node.image", "== 'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var) else: # Linear # Write", "scale) else: # CELLS res = 'tex_voronoi({0} * {1}).r'.format(co, scale)", "arm.material.cycles_functions as c_functions import shutil emission_found = False particle_info =", "col2, fac_var) elif blend == 'DIVIDE': out_col = '(vec3((1.0 -", "if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name))", "{1}, {2}, {3})'.format(co, col1, col2, scale) if sample_bump: write_bump(node, res)", "frag.write('specular = {0};'.format(out_specular)) if '_Emission' in wrd.world_defs: frag.write('emission = {0};'.format(out_emission))", "node.type == 'NORMAL': if socket == node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif", "Color con.add_elem('col', 'short4norm') # Vcols only for now return 'vcolor'", "= image_format interpolation = image_node.interpolation rpdat = arm.utils.get_rp() texfilter =", "# Pink color for missing texture parsed[tex_store] = True curshader.write_textures", "+ '_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const for", "in ar[1]: ar2 = ar[1].split(',', 1) co = ar2[0] post", "* {4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale) if", "particle_info['size'] = True return '1.0' elif node.type == 'VALUE': if", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "node.type == 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node))", "'png' else 'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path) else: # Link", "(1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var,", "vec2) elif op == 'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1, vec2) elif", "res = parse_vector(l.from_node, l.from_socket) if res == None: return None", "if _parse_displacement and disp_enabled() and node.inputs[2].is_linked: parsed = {} parents", "'BUMP': # Interpolation strength strength = parse_value_input(node.inputs[0]) # Height multiplier", "Pass time till drivers are implemented if node.attribute_name == 'time':", "elif socket == node.outputs[2]: # Is Diffuse Ray return '1.0'", "parse_vector_input(node.inputs[0]) # Occlusion out_occlusion = parse_value_input(node.inputs[2]) # Roughness out_roughness =", "/ PI2 + 0.5'.format(co) elif grad == 'QUADRATIC_SPHERE': f =", "None curshader.write('vec3 {0} = {1};'.format(res_var, res)) elif st == 'VALUE':", "# write_normal(node.inputs[3]) pass elif node.type == 'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4])", "range(0, len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position)) # Mix color", "link='_time') return 'time' else: return '0.0' elif node.type == 'CAMERA':", "already parsed, return elif l.from_node.type == 'NORMAL_MAP': return None return", "'MAXIMUM': out_val = 'max({0}, {1})'.format(val1, val2) elif op == 'LESS_THAN':", "tese, parse_surface, parse_opacity, parse_displacement, basecol_only) def parse_output(node, _con, _vert, _frag,", "= _vert frag = _frag geom = _geom tesc =", "return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'NORMAL_MAP': if curshader", "'vec3(0.0)' elif node.type == 'UVMAP': #instance = node.from_instance con.add_elem('tex', 'short2norm')", "f = '({0}.x + {0}.y) * 0.5'.format(co) elif grad ==", "Depth return '0.0' elif node.type == 'OBJECT_INFO': if socket ==", "math.cos(a), math.sin(a)) # if node.rotation[0] != 0.0: # a =", "st == 'RGB' or st == 'RGBA' or st ==", "tex['mag_filter'] = 'linear' tex['mipmap_filter'] = 'no' tex['generate_mipmaps'] = False return", "scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion =", "frag.write_normal += 1 if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute", "{0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3])) curshader.write('{0}_fh1 *= ({1}) * 3.0;", "fac_var = node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac))", "{1};'.format(res_var, res)) # Normal map already parsed, return elif l.from_node.type", "* {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[0] != 0.0:", "out_val = 'floor({0} + 0.5)'.format(val1) elif op == 'FLOOR': out_val", "parse_vector_input(inp) elif inp.type == 'VECTOR': return parse_vector_input(inp) elif inp.type ==", "({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var) elif", "function parts.. ar = res.split('(', 1) pre = ar[0] +", "if mat_users != None and mat in mat_users: mat_user =", "+ safesrc(socket.name) + '_res' def write_result(l): global parsed res_var =", "+ ({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0]) #", "False particle_info['size'] = False particle_info['velocity'] = False particle_info['angular_velocity'] = False", "TBN = cotangentFrame(n, -vVec, texCoord);') frag.write('n = TBN * normalize(texn);')", "vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' return 'fresnel({0}, {1})'.format(ior, dotnv) elif", "'({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2]) # use", "Revert to mix elif blend == 'VALUE': out_col = 'mix({0},", "vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_3 =", "occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1]) if parse_surface: out_basecol =", "parent.inputs[index] res = parse_input(inp) parents.append(parent) # Return to group return", "start) * (1.0 / (finish - start)) return 'mix({0}[{1}], {0}[{1}", "+ '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index = '0'", "{0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular))", "Lifetime particle_info['lifetime'] = True return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On'", "out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2])", "{1};'.format(index_var, index)) if interp == 'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var) else:", "== 'TEX_SKY': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif", "elif texfilter == 'Linear': interpolation = 'Linear' elif texfilter ==", "vec2 = parse_vector_input(node.inputs[1]) op = node.operation if op == 'ADD':", "= parse_vector_input(node.inputs[11]) # clearcoat = parse_vector_input(node.inputs[12]) # clearcoat_rough = parse_vector_input(node.inputs[13])", "# Emission if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0: out_emission =", "elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')", "= 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale) if sample_bump: write_bump(node,", "'SCREEN': out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2}", "= vec4(0.0, 0.0, 0.0, 0.0);') curshader.write(f'if (texCoordBlend.x > 0) {tex_store}", "quadratic for now return '1.0' elif node.type == 'NORMAL': nor", "out_occlusion, out_specular, out_opacity, out_emission def parse_displacement_input(inp): if inp.is_linked: l =", "elif blend == 'COLOR': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0}", "socket: return i def node_name(s): for p in parents: s", "software # distributed under the License is distributed on an", "= 'bposition' scale = parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0} * {1}))'.format(co,", "quadratic # Shaders default to quadratic for now return '1.0'", "= arm.utils.safestr(texfile) s = tex['file'].rsplit('.', 1) if len(s) == 1:", "{1})'.format(val1, val2) elif op == 'POWER': out_val = 'pow({0}, {1})'.format(val1,", "out_val = 'round({0})'.format(val1) out_val = 'floor({0} + 0.5)'.format(val1) elif op", "elif op == 'ROUND': # out_val = 'round({0})'.format(val1) out_val =", "Is Camera Ray return '1.0' elif socket == node.outputs[1]: #", "co = 'bposition' scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2])", "grad == 'EASING': f = '0.0' elif grad == 'DIAGONAL':", "# Volume # parse_volume_input(node.inputs[1]) # Displacement if _parse_displacement and disp_enabled()", "= mat_get_material_users() if mat_users != None and mat in mat_users:", "curshader.shader_type == 'frag' else 'wnormal' elif socket == node.outputs[2]: #", "else 'wnormal' elif socket == node.outputs[4]: # Incoming return 'vVec'", "'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1]) val =", "'RGB': return parse_vector_input(inp) elif inp.type == 'RGBA': return parse_vector_input(inp) elif", "out_val = 'max({0}, {1})'.format(val1, val2) elif op == 'LESS_THAN': out_val", "Make const for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var,", "fac_var, fac_inv_var) out_specular = '({0} * {3} + {1} *", "ext to lowercase on windows if arm.utils.get_os() == 'win': s", "node.type == 'LIGHT_FALLOFF': # Constant, linear, quadratic # Shaders default", "9.36026367e-06, 3.98995841e-01] ] blackbody_table_g = [ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03,", "node_name(node.name) + '_store' def texture_store(node, tex, tex_name, to_linear=False, tex_link=None): global", "'0.0' elif socket == node.outputs[2]: # Is Diffuse Ray return", "occ2, fac_var, fac_inv_var) out_specular = '({0} * {3} + {1}", "== 'TANGENT': return 'wtangent' elif node.type == 'TEX_COORD': #obj =", "subsurface_color = parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5]) #", "None: curshader.write_textures += 1 res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link))", "'1.0' emission_found = True emission_strength = parse_value_input(node.inputs[1]) out_basecol = '({0}", "'(({0} + {1}) / 2.0)'.format(vec1, vec2) elif op == 'DOT_PRODUCT':", "return None # Reference image name texpath = arm.utils.asset_path(filepath) texfile", "{3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var) def write_normal(inp): if inp.is_linked and", "{uv_name}2 = vec2(0.0);') # Temp curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0,", "elif socket == node.outputs[1]: # Is Shadow Ray return '0.0'", "if socket == node.outputs[0]: # Position return 'wposition' elif socket", "+ '.z', curves[3].points)) elif node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h =", "texture else: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath,", "node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name() + ' - Do", "Link image path to assets # TODO: Khamake converts .PNG", "col = parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue,", "= parse_vector_input(node.inputs[12]) # clearcoat_rough = parse_vector_input(node.inputs[13]) # ior = parse_vector_input(node.inputs[14])", "= 'cos({0})'.format(val1) elif op == 'TANGENT': out_val = 'tan({0})'.format(val1) elif", "else None if tex != None: curshader.write_textures += 1 res", "return n def socket_index(node, socket): for i in range(0, len(node.outputs)):", "parents.pop() return out_group def parse_group_input(node, socket): index = socket_index(node, socket)", "* vec3({4}, {5}, {6})) * {3})'.format(\\ vector_curve(name + '0', vec", "elif socket == node.outputs[1]: # TODO: is parse_value path preferred?", "arm.utils import arm.make_state import arm.log import arm.material.mat_state as mat_state import", "'SQRT': out_val = 'sqrt({0})'.format(val1) elif op == 'ABSOLUTE': out_val =", "parsed tex_store = store_var_name(node) # Pink color for missing texture", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "= 'no' tex['generate_mipmaps'] = False return tex def is_pow(num): return", "node.image != None and node.image.colorspace_settings.name == 'sRGB' res = '{0}.rgb'.format(texture_store(node,", "({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] -", "parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright,", "range(1, len(points)): index += ' + ({0} > {1} ?", "tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation == 'Closest':", "new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else: #", "image is None: return None # Get filepath filepath =", "out_roughness = '0.0' out_metallic = '0.0' out_occlusion = '1.0' out_specular", "' + matname + '/' + image.name + ' -", "= '({0}.x + {0}.y) * 0.5'.format(co) elif grad == 'RADIAL':", "fac_var) elif blend == 'ADD': out_col = 'mix({0}, {0} +", "{1})'.format(val1, val2) elif op == 'LOGARITHM': out_val = 'log({0})'.format(val1) elif", "elif node.type == 'COMBXYZ': x = parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1])", "* t_inv + g[1] * t + g[2] rgb[2] =", "UVMaps only for now mat = mat_get_material() mat_users = mat_get_material_users()", "t == 'VECTOR': return 'vec3' else: return 'float' def to_uniform(inp):", "vec1 = parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op = node.operation if", "float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3]))", "'tan({0})'.format(val1) elif op == 'ARCSINE': out_val = 'asin({0})'.format(val1) elif op", "'ADD': out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var)", "dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' return 'fresnel({0},", "'bposition' grad = node.gradient_type if grad == 'LINEAR': f =", "'{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link)) curshader.write_textures -= 1 return res elif", "1 and node.attribute_name == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0", "node.rotation[0] != 0.0: # a = node.rotation[0] # out =", "i def node_name(s): for p in parents: s = p.name", "'OBJECT_INFO': if socket == node.outputs[2]: # Object Index curshader.add_uniform('float objectInfoIndex',", ">= 1167.0): i = 1 else: i = 0 r", "out = 'vec3({0}.y * {1} - {0}.z * {2}, {0}.y", "return nn else: return to_vec3(socket.default_value) elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick)", "ntype): for n in nodes: if n.type == ntype: return", "else 'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path) else: # Link image", "0.0: # ZYX rotation, Z axis for now.. a =", "'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type == 'RGB': if", "fetched if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name) tex =", "blend == 'SOFT_LIGHT': out_col = '((1.0 - {2}) * {0}", "# Roughness out_roughness = parse_value_input(node.inputs[3]) # Metallic out_metallic = parse_value_input(node.inputs[4])", "contr) elif node.type == 'GAMMA': out_col = parse_vector_input(node.inputs[0]) gamma =", "== 'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1]) contr =", "nn = 'param_' + node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return nn", "# Single channel out_occlusion = parse_vector_input(node.inputs[0]) + '.r' elif node.type", "= node_name(node.name) + '_fac' fac_inv_var = node_name(node.name) + '_fac_inv' curshader.write('{0}float", "return res elif node.type == 'TEX_MUSGRAVE': # Fall back to", "= s.replace('_', '_x') return s ## def make_texture(image_node, tex_name, matname=None):", "== 'SEPRGB': col = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return", "out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_displacement_input(inp): if inp.is_linked:", "== node.outputs[3]: # Material Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex'", "image = image_node.image if matname is None: matname = mat_state.material.name", "math.sin(a)) # if node.rotation[1] != 0.0: # a = node.rotation[1]", "curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr) elif node.type ==", "!= '': if node.invert: ext = ['1', '2', '3', '4']", "elif blend == 'SATURATION': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "'NORMAL_MAP': warn(mat_name() + ' - Do not use Normal Map", "frag out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0])", "bc2) out_roughness = '({0} * 0.5 + {1} * 0.5)'.format(rough1,", "parse_input(inp) parents.pop() return out_group def parse_group_input(node, socket): index = socket_index(node,", "TODO: Make const for i in range(0, len(points)): curshader.write('{0}[{1}] =", "_parse_opacity basecol_only = _basecol_only emission_found = False particle_info = {}", "node.image.colorspace_settings.name == 'sRGB' res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link))", "'short2norm') return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif socket ==", "Position return 'wposition' elif socket == node.outputs[1]: # Normal return", "'short4norm') # Vcols only for now return 'vcolor' elif node.type", "* {2})'.format(occ1, occ2, fac_var, fac_inv_var) out_specular = '({0} * {3}", "'TEX_ENVIRONMENT': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type", "range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1])) # Get index", "mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) *", "powimage = is_pow(image.size[0]) and is_pow(image.size[1]) if interpolation == 'Cubic': #", "uname = safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + ' ' +", "'0.0' elif socket == node.outputs[7]: # Ray Length return '0.0'", "parse_vector_input(node.inputs[1]) else: #space = node.space #map = node.uv_map # Color", "fac_var, facs_var) elif node.type == 'CURVE_VEC': # Vector Curves fac", "st == 'RGBA' or st == 'VECTOR': res = parse_vector(l.from_node,", "_basecol_only emission_found = False particle_info = {} particle_info['index'] = False", "= 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var) elif blend", "'1.0' elif node.type == 'NORMAL': nor = parse_vector_input(node.inputs[0]) return 'dot({0},", "== 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op =", "else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic_f({0}", "node.type == 'SEPRGB': col = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]:", "is_pow(num): return ((num & (num - 1)) == 0) and", "points): # Write Ys array ys_var = name + '_ys'", "node.type == 'PARTICLE_INFO': if socket == node.outputs[0]: # Index particle_info['index']", "== node.outputs[1]: # Normal return 'n' elif socket == node.outputs[2]:", "index_var) else: # Linear # Write facs array facs_var =", "parse_vector_input(node.inputs[21]) if parse_opacity: if len(node.inputs) > 20: out_opacity = parse_value_input(node.inputs[18])", "image.name + ' - file not found(' + filepath +", "warn(mat_name() + ' - Do not use Normal Map node", "tex_name, uv_name)) sample_bump = False if to_linear: curshader.write('{0}.rgb = pow({0}.rgb,", "vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z',", "if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type ==", "{1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float", "+ ' ' + uname) return uname def store_var_name(node): return", "{1}, {2})'.format(x, y, z) elif node.type == 'VECT_MATH': vec1 =", "# clearcoat = parse_vector_input(node.inputs[12]) # clearcoat_rough = parse_vector_input(node.inputs[13]) # ior", "'pow({0}, vec3({1}))'.format(out_col, gamma) elif node.type == 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue =", "t = float(parse_value_input(node.inputs[0])) rgb = [0,0,0] blackbody_table_r = [ [2.52432244e+03,", "index index_var = name + '_i' curshader.write('int {0} = {1};'.format(index_var,", "op == 'FLOOR': out_val = 'floor({0})'.format(val1) elif op == 'CEIL':", "'.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac,\\", "None ext = s[1].lower() do_convert = ext not in ('jpg',", "ZYX rotation, Z axis for now.. a = rotation[2] #", "= parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac) elif", "fac_var) elif blend == 'LIGHTEN': out_col = 'max({0}, {1} *", "grad == 'DIAGONAL': f = '({0}.x + {0}.y) * 0.5'.format(co)", "'floor({0} + 0.5)'.format(val1) elif op == 'FLOOR': out_val = 'floor({0})'.format(val1)", "for i in range(1, len(points)): index += ' + ({0}", "vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2])) # Get index", "Velocity particle_info['velocity'] = True return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On'", "'short2norm') mat = mat_get_material() mat_users = mat_get_material_users() if mat_users !=", "parent = parents.pop() # Leaving group inp = parent.inputs[index] res", "parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'EMISSION': if parse_surface:", "elif node.type == 'SEPRGB': col = parse_vector_input(node.inputs[0]) if socket ==", "fac_var) elif blend == 'DIVIDE': out_col = '(vec3((1.0 - {2})", "shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath) else: if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' +", "node.type == 'PARTICLE_INFO': if socket == node.outputs[3]: # Location particle_info['location']", "'({0} + {1})'.format(vec1, vec2) elif op == 'SUBTRACT': return '({0}", "if parse_opacity: out_opacity = '({0} * 0.5 + {1} *", "parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val,", "r[0] * t_inv + r[1] * t + r[2] rgb[1]", "grad == 'LINEAR': f = '{0}.x'.format(co) elif grad == 'QUADRATIC':", "bc2, fac_var, fac_inv_var) out_roughness = '({0} * {3} + {1}", "node.projection == 'BOX' if triplanar: curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2", "vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name + '3b',", "licensed as # Copyright 2011-2013 Blender Foundation # # Licensed", "distortion = parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co,", "= parse_vector_input(inp) if normal_res != None: curshader.write('n = {0};'.format(normal_res)) def", "rpdat = arm.utils.get_rp() if rpdat.arm_rp_displacement == 'Tessellation' and tese !=", "Vcols only for now return 'vcolor' elif node.type == 'ATTRIBUTE':", "+ '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points),", "('jpg', 'png', 'hdr', 'mp4') # Convert image if do_convert: new_ext", "a = rotation[2] # x * cos(theta) - y *", "Revert to mix elif blend == 'COLOR': out_col = 'mix({0},", "instead # if node.use_min: # out = 'max({0}, vec3({1}, {2},", "= {0};'.format(normal_res)) def is_parsed(s): global parsed return s in parsed", "'AVERAGE': return '(({0} + {1}) / 2.0)'.format(vec1, vec2) elif op", "ar[1][:-1] post = ')' curshader.write('float {0}_1 = {1}{2} + vec3(-{4},", "fac_var) if node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else: return out_col", "nn = 'param_' + node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return nn", "return mat_state.texture_grad def mat_get_material(): return mat_state.material def mat_get_material_users(): return mat_state.mat_users", "to_vec3([0.0, 0.0, 0.0]) else: if mat_batch() and inp.is_uniform: return to_uniform(inp)", "# clearcoar_normal = parse_vector_input(node.inputs[20]) # tangent = parse_vector_input(node.inputs[21]) if parse_opacity:", "normal_res = parse_vector_input(inp) if normal_res != None: curshader.write('n = {0};'.format(normal_res))", "= parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv", "Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif socket == node.outputs[4]:", "n in nodes: if n.type == ntype: return n def", "# clearcoat_rough = parse_vector_input(node.inputs[13]) # ior = parse_vector_input(node.inputs[14]) # transmission", "== 'AMBIENT_OCCLUSION': if parse_surface: # Single channel out_occlusion = parse_vector_input(node.inputs[0])", "False particle_info = None # Particle info export def parse(nodes,", "i = 3 elif(t >= 1449.0): i = 2 elif(t", "'LOGARITHM': out_val = 'log({0})'.format(val1) elif op == 'SQRT': out_val =", "!= 1.0 or scale[1] != 1.0 or scale[2] != 1.0:", "== 0) and num != 0 def is_ascii(s): return len(s)", "frag = _frag geom = _geom tesc = _tesc tese", "output_node = node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node != None: parse_output(output_node, con,", "= parse_value_input(node.inputs[3]) res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2,", "= parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0},", "== 'NORMAL_MAP': return None return res_var def glsl_type(t): if t", "col2, scale) if sample_bump: write_bump(node, res) return res elif node.type", "res = 'tex_voronoi({0} * {1}).r'.format(co, scale) if sample_bump: write_bump(node, res)", "blend == 'DODGE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "= 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale) else: # CELLS res =", "socket == node.outputs[6]: # Reflection return 'vec3(0.0)' elif node.type ==", "+ '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points),", "{0}) * {1} * {0} + {0} * (vec3(1.0) -", "node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png')", "if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath): shutil.copy(texpath, unpack_filepath) arm.assets.add(unpack_filepath)", "+ ')') return None if do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled',", "write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 -", "def res_var_name(node, socket): return node_name(node.name) + '_' + safesrc(socket.name) +", "parse_shader(node, socket): global emission_found out_basecol = 'vec3(0.8)' out_roughness = '0.0'", "emi2 = parse_shader_input(node.inputs[1]) if parse_surface: out_basecol = '({0} + {1})'.format(bc1,", "spec2, fac_var, fac_inv_var) out_emission = '({0} * {3} + {1}", "= {0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion =", "== 'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4]) # Revert to glossy out_basecol", "res == None: return None curshader.write('vec3 {0} = {1};'.format(res_var, res))", "+ vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_4", "= 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' return 'fresnel({0}, {1})'.format(ior,", "{2};'.format(facs_var, i, points[i].location[0])) # Map vector return 'mix({0}[{1}], {0}[{1} +", "*= {0};'.format(strength)) frag.write('n = normalize(TBN * n);') con.add_elem('tang', 'short4norm') frag.write_normal", "{4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale) if sample_bump:", "n)'.format(sample_bump_res) sample_bump_res = '' else: res = 'n' return res", "node.outputs[5]: # Velocity particle_info['velocity'] = True return 'p_velocity' if arm.utils.get_rp().arm_particles", "else: return None def parse_vector_input(inp): if inp.is_linked: l = inp.links[0]", "= arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile) s =", "Object return 'mposition' elif socket == node.outputs[4]: # Camera return", "'4', '3'] curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2", "== 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "== 'GROUP': if node.node_tree.name.startswith('Armory PBR'): if parse_surface: # Base color", "out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_displacement_input(inp): if", "bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0]) bc2,", "= '({0} * {3} + {1} * {2})'.format(rough1, rough2, fac_var,", "'./' + image.name has_ext = filepath.endswith(('.jpg', '.png', '.hdr')) if not", "out = 'vec3({0}.x * {1} - {0}.z * {2}, {0}.x", "{1}))'.format(col1, col2, fac_var) elif blend == 'DIFFERENCE': out_col = 'mix({0},", "True elif interpolation == 'Closest': tex['min_filter'] = 'point' tex['mag_filter'] =", "'png', 'hdr', 'mp4') # Convert image if do_convert: new_ext =", "[ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01],", "def make_texture(image_node, tex_name, matname=None): tex = {} tex['name'] = tex_name", "particle_info['location'] = False particle_info['size'] = False particle_info['velocity'] = False particle_info['angular_velocity']", "node.type == 'TEX_COORD': #obj = node.object #instance = node.from_instance if", "[-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03,", "= 'float({0} > {1})'.format(val1, val2) elif op == 'ROUND': #", "{0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3])) curshader.write('{0}_fh1 *= ({1})", "'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0]) uv_name", "if not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path = os.path.join(unpack_path, tex['file']) # TODO:", "rpdat.arm_rp_displacement == 'Tessellation' and tese != None: curshader = tese", "'frag' else 'wnormal' elif socket == node.outputs[2]: # Tangent return", "# if node.use_min: # out = 'max({0}, vec3({1}, {2}, {3}))'.format(out,", "{0} + {0} * (vec3(1.0) - (vec3(1.0) - {1}) *", "return '0.0' elif node.type == 'SEPRGB': col = parse_vector_input(node.inputs[0]) if", "link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)' # View Distance else: curshader.add_uniform('vec3 eye',", "a = node.rotation[1] # out = 'vec3({0}.x * {1} -", "if parse_surface: # Multiply basecol out_basecol = parse_vector_input(node.inputs[0]) out_emission =", "if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale", "rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1]) if parse_surface:", "val2) elif op == 'SUBTRACT': out_val = '({0} - {1})'.format(val1,", "'atan({0}, {1})'.format(val1, val2) if node.use_clamp: return 'clamp({0}, 0.0, 1.0)'.format(out_val) else:", "normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0)))", "return 'mposition' elif node.type == 'HAIR_INFO': return 'vec3(0.0)' # Tangent", "0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[0] != 0.0: # a", "return res_var def glsl_type(t): if t == 'RGB' or t", "return '{0}.z'.format(vec) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2", "'3c', vec + '.z', curves[3].points)) elif node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat)", "curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))", "now.. a = rotation[2] # x * cos(theta) - y", "+ {0}.g * 0.59 + {0}.b * 0.11) / 3.0)", "hue = parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2]) fac", "12000): rgb[0] = 0.826270103 rgb[1] = 0.994478524 rgb[2] = 1.56626022", "elif node.type == 'TEX_MUSGRAVE': # Fall back to noise curshader.add_function(c_functions.str_tex_musgrave)", "basecol_only: return if inp.is_linked == False: return if normal_parsed: return", "== 'LOGARITHM': out_val = 'log({0})'.format(val1) elif op == 'SQRT': out_val", "* {2})'.format(rough1, rough2, fac_var, fac_inv_var) out_metallic = '({0} * {3}", "out_emission = parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness =", "aniso_rot = parse_vector_input(node.inputs[9]) # sheen = parse_vector_input(node.inputs[10]) # sheen_tint =", "parsed[tex_store] = True curshader.write_textures += 1 curshader.write('vec4 {0} = vec4(1.0,", "found(' + filepath + ')') return None if do_convert: unpack_path", "curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const for i in", "socket == node.outputs[4]: # Incoming return 'vVec' elif socket ==", "elif socket == node.outputs[1]: # Normal return 'n' elif socket", "'wposition' elif socket == node.outputs[1]: # Normal return 'n' if", "{0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co) res =", "* texCoordBlend.z;') else: if mat_texture_grad(): curshader.write('vec4 {0} = textureGrad({1}, {2}.xy,", "Make const for i in range(0, len(elems)): curshader.write('{0}[{1}] = vec3({2},", "scale) def parse_normal_map_color_input(inp, strength_input=None): global normal_parsed global frag if basecol_only:", "== node.outputs[9]: # Transparent Depth return '0.0' elif socket ==", "ys_var = name + '_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO:", "sample_bump_res con = _con vert = _vert frag = _frag", "(ext in ('tga', 'dds')) else 'jpg' tex['file'] = tex['file'].rsplit('.', 1)[0]", "s ## def make_texture(image_node, tex_name, matname=None): tex = {} tex['name']", "/unpacked filepath += '.raw' elif image.source == \"GENERATED\": unpack_path =", "safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + ' ' + uname) return", "ext[2], ext[3])) curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1})", "vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1]) # if node.use_max: # out", "= 'param_' + node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return nn else:", "return res_var else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else:", "== 'Smart': # Mipmap anisotropic tex['min_filter'] = 'anisotropic' tex['mipmap_filter'] =", "= normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2,", "- 1)) == 0) and num != 0 def is_ascii(s):", "node.outputs[6]: # Angular Velocity particle_info['angular_velocity'] = True return 'vec3(0.0)' elif", "Write facs array facs_var = node_name(node.name) + '_facs' curshader.write('float {0}[{1}];'.format(facs_var,", "return tex_store parsed[tex_store] = True mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name),", "# Normal return 'n' if curshader.shader_type == 'frag' else 'wnormal'", "new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path) else:", "elif node.type == 'TEX_SKY': # Pass through return to_vec3([0.0, 0.0,", "+ safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + ' ' + uname) return uname", "1.0)'.format(out_val) else: return out_val elif node.type == 'RGBTOBW': col =", "op == 'SQRT': out_val = 'sqrt({0})'.format(val1) elif op == 'ABSOLUTE':", "co = ar2[0] post = ',' + ar2[1] else: co", "curshader.write('float {0} = {1};'.format(fac_var, fac)) col1 = parse_vector_input(node.inputs[1]) col2 =", "== 'VALTORGB': # ColorRamp fac = parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation", "{2})'.format(met1, met2, fac_var, fac_inv_var) out_occlusion = '({0} * {3} +", "parse_surface or parse_opacity: parsed = {} parents = [] normal_parsed", "return '1.0' elif socket == node.outputs[1]: # Is Shadow Ray", "{0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec1(node.outputs[0].default_value) elif node.type ==", "if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name) tex = make_texture(node,", "parents.append(node) out_group = parse_input(inp) parents.pop() return out_group def parse_group_input(node, socket):", "= os.path.join(unpack_path, tex['file']) # TODO: delete cache when file changes", "if parse_surface: write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0]) elif node.type == 'BSDF_TOON':", "parse_vector_input(node.inputs[2]) # subsurface_color = parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4]) out_specular =", "None: # Empty texture tex = {} tex['name'] = tex_name", "= '1.0' out_opacity = '1.0' out_emission = '0.0' if node.type", "* {2})'.format(col1, col2, fac_var) elif blend == 'OVERLAY': out_col =", "= node.color_ramp.interpolation elems = node.color_ramp.elements if len(elems) == 1: return", "3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ] if (t >=", "= name + '_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make", "len(s) == len(s.encode()) ## def get_rp_renderer(): return arm.utils.get_rp().rp_renderer def get_arm_export_tangents():", "out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic = '0.0' out_occlusion", "- {1})'.format(val1, val2) elif op == 'MULTIPLY': out_val = '({0}", "* {1} * {0} + {0} * (vec3(1.0) - (vec3(1.0)", "= parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif", "out_roughness = parse_value_input(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))", "if node.rotation[0] != 0.0: # a = node.rotation[0] # out", "'ARCTANGENT': out_val = 'atan({0})'.format(val1) elif op == 'ARCTAN2': out_val =", "parse_displacement, basecol_only) def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese,", "== 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "socket == node.outputs[1]: # Age particle_info['age'] = True return 'p_age'", "== 'SINE': out_val = 'sin({0})'.format(val1) elif op == 'COSINE': out_val", "- sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z", "{0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3])) curshader.write('{0}_fh1", "node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1])", "== 'COSINE': out_val = 'cos({0})'.format(val1) elif op == 'TANGENT': out_val", "= ar[0] + '(' if ',' in ar[1]: ar2 =", "interpolation == 'Closest': tex['min_filter'] = 'point' tex['mag_filter'] = 'point' #", "* {1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale)", "# Mipmap anisotropic tex['min_filter'] = 'anisotropic' tex['mipmap_filter'] = 'linear' tex['generate_mipmaps']", "0.1) return res elif node.type == 'TEX_POINTDENSITY': return '0.0' elif", "= textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_4 =", "# Multiply basecol out_basecol = parse_vector_input(node.inputs[0]) out_emission = '1.0' emission_found", "= node.blend_type if blend == 'MIX': out_col = 'mix({0}, {1},", "= parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma) elif", "'ADD': return '({0} + {1})'.format(vec1, vec2) elif op == 'SUBTRACT':", "node.image == None: # Empty texture tex = {} tex['name']", "* (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1,", "'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "res.split('(', 1) pre = ar[0] + '(' if ',' in", "# Vector Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves", "_tese parse_surface = _parse_surface parse_opacity = _parse_opacity basecol_only = _basecol_only", "parse_shader(l.from_node, l.from_socket) else: out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic", "= 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic_f({0} * {1}", "0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return res elif node.type", "# Get filepath filepath = image.filepath if filepath == '':", "Age particle_info['age'] = True return 'p_age' if arm.utils.get_rp().arm_particles == 'On'", "== 1: return to_vec3(elems[0].color) # Write cols array cols_var =", "= 'fract({0})'.format(val1) elif op == 'MODULO': # out_val = 'float({0}", "'_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) col1 = parse_vector_input(node.inputs[1]) col2", "'vec3({0}.y * {1} - {0}.z * {2}, {0}.y * {2}", "1.0, 1.0);'.format(tex_store)) curshader.write_textures -= 1 return '{0}.rgb'.format(tex_store) elif node.type ==", "{0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name)) if sample_bump: sample_bump_res =", "# Write facs array facs_var = node_name(node.name) + '_facs' curshader.write('float", "node.type == 'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1]) if parse_opacity: out_opacity =", "'INTENSITY': res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale) else: # CELLS", "+ 0.5'.format(co) elif grad == 'QUADRATIC_SPHERE': f = '0.0' elif", "return parse_value_input(node.inputs[7]) else: return None else: return parse_group(node, socket) elif", "distortion = parse_value_input(node.inputs[3]) res = 'tex_noise({0} * {1})'.format(co, scale) if", "= arm.utils.get_rp() if rpdat.arm_rp_displacement == 'Tessellation' and tese != None:", "1.0 - texCoord.y, 0.0)' elif socket == node.outputs[3]: # Object", "= parse_vector_input(node.inputs[2]) blend = node.blend_type if blend == 'MIX': out_col", "socket == node.outputs[6]: # Is Transmission Ray return '0.0' elif", "to_vec3(elems[0].color) # Write cols array cols_var = node_name(node.name) + '_cols'", "Image Texture directly') parse_normal_map_color_input(node.inputs[5]) # Emission if node.inputs[6].is_linked or node.inputs[6].default_value", "f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co) elif grad", "'GROUP_INPUT': normal_res = parse_vector_input(inp) if normal_res != None: curshader.write('n =", "if strength_input != None: strength = parse_value_input(strength_input) if strength !=", "{0}.z * {0}.z), 0.0)'.format(co) res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f) if", "#type = node.vector_type #conv_from = node.convert_from #conv_to = node.convert_to #", "filepath += '.raw' elif image.source == \"GENERATED\": unpack_path = os.path.join(arm.utils.get_fp_build(),", "'/' + image.name + ' - file not found(' +", "False particle_info['angular_velocity'] = False sample_bump = False sample_bump_res = ''", "dotnv = 'dotNV' return 'fresnel({0}, {1})'.format(ior, dotnv) elif node.type ==", "+ {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var) out_emission = '({0}", "op = node.operation if op == 'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1,", "tex['min_filter'] = 'anisotropic' tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif", "g = parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(r,", "os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath =", "parse_surface: write_normal(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif", "fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves = node.mapping.curves name", "fac_var) # Revert to mix # out_col = '({0} +", "'SUBTRACT': return '({0} - {1})'.format(vec1, vec2) elif op == 'AVERAGE':", "{2})'.format(opac1, opac2, fac_var, fac_inv_var) elif node.type == 'ADD_SHADER': bc1, rough1,", "# Is Singular Ray return '0.0' elif socket == node.outputs[5]:", "parse_vector_input(node.inputs[14]) # transmission = parse_vector_input(node.inputs[15]) # transmission_roughness = parse_vector_input(node.inputs[16]) if", "'tex_noise({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return", "is not None: filepath = './' + image.name has_ext =", "location[0] != 0.0 or location[1] != 0.0 or location[2] !=", "l.from_node.type == 'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var = write_result(l) st =", "or agreed to in writing, software # distributed under the", "parse_value_input(node.inputs[1]) out_specular = '0.0' elif node.type == 'BSDF_GLOSSY': if parse_surface:", "== 'OVERLAY': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "'FRACT': out_val = 'fract({0})'.format(val1) elif op == 'MODULO': # out_val", "elif node.type == 'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0])", "1 curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures -=", "parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5]) # specular_tint = parse_vector_input(node.inputs[6]) out_roughness =", "not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size: with open(unpack_filepath, 'wb') as", "return res elif node.type == 'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0]) bright", "if parse_surface: write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1]) if parse_opacity: out_opacity =", "'TEX_POINTDENSITY': return '0.0' elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() +", "'vec3(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return res elif", "Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_VORONOI':", "{1};'.format(fac_var, fac)) index = '0' for i in range(1, len(points)):", "= parse_vector_input(node.inputs[1]) op = node.operation if op == 'DOT_PRODUCT': return", "distortion = parse_value_input(node.inputs[3]) # Slow.. res = 'vec3(tex_noise({0} * {1}),", "{0}_fh2, 2.0))) * n)'.format(sample_bump_res) sample_bump_res = '' else: res =", "+ 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co", "({0}.y) * {2}, {0}.x * {2} + ({0}.y) * {1},", "= '(vec3((1.0 - {2}) * {0} + {2} * {0}", "if parse_surface: out_basecol = '({0} + {1})'.format(bc1, bc2) out_roughness =", "'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op = node.operation", "{1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[1] != 0.0: #", "as mat_state import arm.material.cycles_functions as c_functions import shutil emission_found =", "seems to load full images on size request, cache size", "st == 'VECTOR': res = parse_vector(l.from_node, l.from_socket) if res ==", "curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2", "'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co =", "return 'mposition' elif socket == node.outputs[4]: # Camera return 'vec3(0.0)'", "- {1}, {2})'.format(col1, col2, fac_var) elif blend == 'SCREEN': out_col", "st == 'VECTOR': return '{0}.x'.format(res_var) else: # VALUE return res_var", "'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y +", "+ s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) # if image_format != 'RGBA32': #", "out_val = 'ceil({0})'.format(val1) elif op == 'FRACT': out_val = 'fract({0})'.format(val1)", "* 0.5)'.format(co, scale) if sample_bump: write_bump(node, res) return res elif", "Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv)", "Tangent return 'wtangent' elif socket == node.outputs[3]: # True Normal", "* 0.5))'.format(co, scale) if sample_bump: write_bump(node, res) return res elif", "different or file does not exist yet if image.packed_file is", "{0} + {2} * {0} / {1}))'.format(col1, col2, fac_var) elif", "{0}.y + {0}.z * {0}.z), 0.0)'.format(co) res = 'vec3(clamp({0}, 0.0,", "vec2) elif op == 'SUBTRACT': return '({0} - {1})'.format(vec1, vec2)", "path preferred? nor = parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif", "== 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1]) v", "= parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3]) scale =", "'EMISSION': if parse_surface: # Multiply basecol out_basecol = parse_vector_input(node.inputs[0]) out_emission", "'ADD': out_val = '({0} + {1})'.format(val1, val2) elif op ==", "* {1} * 0.5))'.format(co, scale) if sample_bump: write_bump(node, res) return", "{1} = {2};'.format(prefix, fac_var, fac)) curshader.write('{0}float {1} = 1.0 -", "write_bump(node, res) return res elif node.type == 'TEX_ENVIRONMENT': # Pass", "node.type == 'TANGENT': return 'wtangent' elif node.type == 'TEX_COORD': #obj", "node.outputs[3]: # Object return 'mposition' elif socket == node.outputs[4]: #", "def node_by_type(nodes, ntype): for n in nodes: if n.type ==", "z = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(x, y, z) elif", "= '1.0' out_metallic = '1.0' elif node.type == 'VOLUME_ABSORPTION': pass", "scale) if sample_bump: write_bump(node, res, 0.1) return res elif node.type", "vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') #", "= parents.pop() # Leaving group inp = parent.inputs[index] res =", "return '{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name) tex = make_texture(node, tex_name) tex_link", "{0};'.format(normal_res)) def is_parsed(s): global parsed return s in parsed def", "Multiply basecol out_basecol = parse_vector_input(node.inputs[0]) out_emission = '1.0' emission_found =", "global parse_surface global parse_opacity global basecol_only global emission_found global particle_info", "{tex_store} = vec4(0.0, 0.0, 0.0, 0.0);') curshader.write(f'if (texCoordBlend.x > 0)", "particle_info['age'] = False particle_info['lifetime'] = False particle_info['location'] = False particle_info['size']", "out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol = {0};'.format(out_basecol))", "out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2} *", "'ARCTAN2': out_val = 'atan({0}, {1})'.format(val1, val2) if node.use_clamp: return 'clamp({0},", "= '({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2) elif", "in nodes: if n.type == ntype: return n def socket_index(node,", "/ ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var)", "out_occlusion = '({0} * {3} + {1} * {2})'.format(occ1, occ2,", "con.add_elem('tang', 'short4norm') frag.write_normal -= 1 def parse_value_input(inp): if inp.is_linked: l", "== 'EMISSION': if parse_surface: # Multiply basecol out_basecol = parse_vector_input(node.inputs[0])", "450.0) / 150.0)'.format(wl) # Vector elif node.type == 'CAMERA': #", "elif node.type == 'AMBIENT_OCCLUSION': if parse_surface: # Single channel out_occlusion", "= True return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'", "node.from_instance if socket == node.outputs[0]: # Generated - bounds return", "{} tex['name'] = tex_name tex['file'] = '' return '{0}.rgb'.format(texture_store(node, tex,", "return None curshader.write('float {0} = {1};'.format(res_var, res)) # Normal map", "missing texture parsed[tex_store] = True curshader.write_textures += 1 curshader.write('vec4 {0}", "tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False): output_node = node_by_type(nodes, 'OUTPUT_MATERIAL') if", "(pos - start) * (1.0 / (finish - start)) return", "con.add_elem('tex', 'short2norm') mat = mat_get_material() mat_users = mat_get_material_users() if mat_users", "3.0) * 2.5)'.format(col) elif node.type == 'SEPHSV': return '0.0' elif", "socket == node.outputs[0]: # Color con.add_elem('col', 'short4norm') # Vcols only", "= image_node.interpolation rpdat = arm.utils.get_rp() texfilter = rpdat.arm_texture_filter if texfilter", "'sin({0})'.format(val1) elif op == 'COSINE': out_val = 'cos({0})'.format(val1) elif op", "socket == node.outputs[5]: # Is Reflection Ray return '0.0' elif", "parse_vector_input(node.inputs[9]) # sheen = parse_vector_input(node.inputs[10]) # sheen_tint = parse_vector_input(node.inputs[11]) #", "{2})'.format(occ1, occ2, fac_var, fac_inv_var) out_specular = '({0} * {3} +", "z) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 =", "* {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) if", "None and mat in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data,", "out_specular = '({0} * {3} + {1} * {2})'.format(spec1, spec2,", "missing texture curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) return", "parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None elif node.type == 'VECT_TRANSFORM': #type =", "'png' if (ext in ('tga', 'dds')) else 'jpg' tex['file'] =", "# sheen = parse_vector_input(node.inputs[10]) # sheen_tint = parse_vector_input(node.inputs[11]) # clearcoat", "vec3(0.0), vec3(1.0))'.format(out_col) else: return out_col elif node.type == 'BLACKBODY': t", "= '0' for i in range(1, len(points)): index += '", "return '{0}.x'.format(vec) elif socket == node.outputs[1]: return '{0}.y'.format(vec) elif socket", "0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store,", "'GROUP': return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node,", "* {1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node, res, 0.1)", "* {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[1] != 0.0:", "tese else: curshader = vert out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp", "matname = mat_state.material.name if image is None: return None #", "node.inputs[17].default_value[0] != 0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True #", "mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec1(inp.default_value) def parse_value(node,", "({0}), {1})'.format(out_col, fac) elif node.type == 'MIX_RGB': fac = parse_value_input(node.inputs[0])", "sin(theta) + y * cos(theta) out = 'vec3({0}.x * {1}", "node.uv_map == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y,", "{2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_2 = textureOffset({1}, {2}.xy,", "return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\\", "when file changes if not os.path.isfile(converted_path): fmt = 'PNG' if", "Normal return 'n' if curshader.shader_type == 'frag' else 'wnormal' elif", "load full images on size request, cache size instead powimage", "out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend", "'Closest': tex['min_filter'] = 'point' tex['mag_filter'] = 'point' # else defaults", "9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00],", "fac_var, fac_inv_var) out_occlusion = '({0} * {3} + {1} *", "out_roughness = '({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2)", "uv_name)) else: curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name)) if", "'0.0' out_metallic = '0.0' out_occlusion = '1.0' out_specular = '1.0'", "parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res =", "== 'CAMERA': # View Vector in camera space return 'vVecCam'", "- {0}) * {1} * {0} + {0} * (vec3(1.0)", "or t == 'RGBA' or t == 'VECTOR': return 'vec3'", "= os.path.join(unpack_path, tex['file']) if do_convert: if not os.path.isfile(unpack_filepath): fmt =", "import bpy import os import arm.assets import arm.utils import arm.make_state", "or st == 'RGBA' or st == 'VECTOR': return '{0}.x'.format(res_var)", "pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store def write_bump(node, res, scl=0.001): global sample_bump", "node.convert_from #conv_to = node.convert_to # Pass throuh return parse_vector_input(node.inputs[0]) elif", "== 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'VERTEX_COLOR': con.add_elem('col',", "distortion = parse_value_input(node.inputs[3]) res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co,", "{0}.y + {0}.z * {0}.z), 0.0)'.format(co) res = '(clamp({0}, 0.0,", "'1.0' elif node.type == 'MATH': val1 = parse_value_input(node.inputs[0]) val2 =", "({1} < 0.5) ? 2.0 * {1} : 0.5 /", "to_vec3(node.outputs[0].default_value) elif socket == node.outputs[1]: # TODO: is parse_value path", "_parse_displacement, _basecol_only): global parsed # Compute nodes only once global", "-4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05,", "'CURVE_VEC': # Vector Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1])", "## def vector_curve(name, fac, points): # Write Ys array ys_var", "'0.0' ## def vector_curve(name, fac, points): # Write Ys array", "((b[0] * t + b[1]) * t + b[2]) *", "= 'point' # else defaults to linear if image_node.extension !=", "'({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2) elif node.type", "= parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name) else: uv_name", "node.type == 'NORMAL_MAP': if curshader == tese: return parse_vector_input(node.inputs[1]) else:", "sample_bump: write_bump(node, res) return res elif node.type == 'TEX_GRADIENT': if", "name + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index =", "col2, fac_var) elif blend == 'DIFFERENCE': out_col = 'mix({0}, abs({0}", "nor) elif node.type == 'VALTORGB': # ColorRamp return '1.0' elif", "0.0)' elif socket == node.outputs[3]: # Object return 'mposition' elif", "l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var", "== 'ADD': out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2,", "col2, fac_var) # Revert to mix elif blend == 'BURN':", "node.outputs[10]: # Transmission Depth return '0.0' elif node.type == 'OBJECT_INFO':", "parse_vector_input(node.inputs[3]) if sample_bump_res != '': if node.invert: ext = ['1',", "t_inv = 1.0 / t rgb[0] = r[0] * t_inv", "-vVec, texCoord);') frag.write('n = TBN * normalize(texn);') else: frag.write('vec3 n", "'vec3(0.0)' elif socket == node.outputs[6]: # Angular Velocity particle_info['angular_velocity'] =", "None: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size: with open(unpack_filepath,", "- {2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0)", "node.type == 'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1]) contr", "textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name)) else: curshader.write('vec4 {0} =", "+ {0}.z * {0}.z), 0.0)'.format(co) res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f)", "node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "if tex != None: curshader.write_textures += 1 to_linear = node.image", "return 'vec3(0.0)' # 'wvpposition' elif socket == node.outputs[6]: # Reflection", "5 elif(t >= 3315.0): i = 4 elif(t >= 1902.0):", "Emission if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0: out_emission = parse_value_input(node.inputs[6])", "* {1} + 0.66))'.format(co, scale) if sample_bump: write_bump(node, res, 0.1)", "cos(theta) - y * sin(theta) # x * sin(theta) +", "arm.utils.get_rp() texfilter = rpdat.arm_texture_filter if texfilter == 'Anisotropic': interpolation =", "= l.from_socket.type if st == 'RGB' or st == 'RGBA'", "Pass throuh return parse_vector_input(node.inputs[0]) elif node.type == 'COMBXYZ': x =", "safesrc(node.name) tex = make_texture(node, tex_name) tex_link = node.name if node.arm_material_param", "parse_vector(l.from_node, l.from_socket) if res == None: return None curshader.write('vec3 {0}", "= os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) filepath", "'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic({0} * {1} *", "if l.from_node.type == 'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l) st", "= normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res))", "= parse_vector_input(node.inputs[1]) op = node.operation if op == 'ADD': return", "0.0);') curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) *", "elif blend == 'SCREEN': out_col = '(vec3(1.0) - (vec3(1.0 -", "time', link='_time') return 'time' else: return '0.0' elif node.type ==", "bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1]) if", "socket == node.outputs[2]: # UV con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x, 1.0", "node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2})", "{1} ? 1 : 0)'.format(fac_var, points[i].location[0]) # Write index index_var", "1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ] blackbody_table_b =", "# Backfacing return '(1.0 - float(gl_FrontFacing))' elif socket == node.outputs[7]:", "if node.coloring == 'INTENSITY': res = 'tex_voronoi({0} * {1}).a'.format(co, scale)", "curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "rotation[2] # x * cos(theta) - y * sin(theta) #", "'_bump' # Testing.. get function parts.. ar = res.split('(', 1)", "if tex != None: curshader.write_textures += 1 res = '{0}.a'.format(texture_store(node,", "== 'TANGENT': out_val = 'tan({0})'.format(val1) elif op == 'ARCSINE': out_val", "= ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) if strength_input != None:", "out_emission = parse_value_input(node.inputs[6]) emission_found = True if parse_opacity: out_opacity =", "= rpdat.arm_texture_filter if texfilter == 'Anisotropic': interpolation = 'Smart' elif", "opac1, emi1 = parse_shader_input(node.inputs[1]) bc2, rough2, met2, occ2, spec2, opac2,", "# RGB Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves", "0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13,", "sample_bump: write_bump(node, res) return res elif node.type == 'TEX_IMAGE': #", "Unless required by applicable law or agreed to in writing,", "= parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2]) nor =", "res elif node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co =", "curshader.write_textures += 1 to_linear = node.image != None and node.image.colorspace_settings.name", "curshader.write('float {0} = {1};'.format(fac_var, fac)) index = '0' for i", "parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False): output_node = node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node", "{2}, {0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a),", "tex['u_addressing'] = 'clamp' tex['v_addressing'] = 'clamp' if image.source == 'MOVIE':", "0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04,", "= node.convert_to # Pass throuh return parse_vector_input(node.inputs[0]) elif node.type ==", "return 'vVec' elif socket == node.outputs[5]: # Parametric return 'mposition'", "= '{0}.x'.format(co) elif grad == 'QUADRATIC': f = '0.0' elif", "'NORMAL': if socket == node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif socket ==", "= parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) blend = node.blend_type if blend", "node.attribute_name == 'time': curshader.add_uniform('float time', link='_time') return 'time' else: return", "if mat_texture_grad(): curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name,", "filepath = os.path.join(unpack_path, image.name + \".jpg\") arm.utils.convert_image(image, filepath, \"JPEG\") else:", "Get index fac_var = name + '_fac' curshader.write('float {0} =", "t + b[1]) * t + b[2]) * t +", "(2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var) if node.use_clamp: return", "name = node_name(node.name) # mapping.curves[0].points[0].handle_type # bezier curve return '(vec3({0},", "'VALUE': # Unlinked reroute return to_vec3([0.0, 0.0, 0.0]) else: if", "{3})'.format(co, col1, col2, col3, scale) if sample_bump: write_bump(node, res) return", "curshader.shader_type == 'frag' else 'wnormal' elif socket == node.outputs[4]: #", "'_x') return s ## def make_texture(image_node, tex_name, matname=None): tex =", "else: return to_vec1(node.outputs[0].default_value) elif node.type == 'WIREFRAME': #node.use_pixel_size # size", "out_val = '({0} * {1})'.format(val1, val2) elif op == 'DIVIDE':", "0.0)'.format(out, math.cos(a), math.sin(a)) if location[0] != 0.0 or location[1] !=", "node.rotation[1] != 0.0: # a = node.rotation[1] # out =", "{2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_4 = textureOffset({1}, {2}.xy,", "Linear # Write facs array facs_var = node_name(node.name) + '_facs'", "{0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post,", "{uv_name}2.xy) * texCoordBlend.z;') else: if mat_texture_grad(): curshader.write('vec4 {0} = textureGrad({1},", "(t >= 6365.0): i = 5 elif(t >= 3315.0): i", "= 'float({0} < {1})'.format(val1, val2) elif op == 'GREATER_THAN': out_val", "elif inp.type == 'RGB': return parse_vector_input(inp) elif inp.type == 'RGBA':", "node.rotation[0] # out = 'vec3({0}.y * {1} - {0}.z *", "'{0}.g'.format(col) elif socket == node.outputs[2]: return '{0}.b'.format(col) elif node.type ==", "= {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl))", "== 'QUADRATIC_SPHERE': f = '0.0' elif grad == 'SPHERICAL': f", "== node.outputs[10]: # Transmission Depth return '0.0' elif node.type ==", "= 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) elif blend == 'ADD':", "# Location particle_info['location'] = True return 'p_location' if arm.utils.get_rp().arm_particles ==", "rgb[2] = 0.0 else: if (t >= 6365.0): i =", "elif node.type == 'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1])", "parsed = {} parents = [] normal_parsed = False curshader", "to_vec1(v): return str(v) def to_vec3(v): return 'vec3({0}, {1}, {2})'.format(v[0], v[1],", "'({0} * {3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var)", "elif node.type == 'CURVE_RGB': # RGB Curves fac = parse_value_input(node.inputs[0])", "post = ',' + ar2[1] else: co = ar[1][:-1] post", "'0.0' out_occlusion = '1.0' out_specular = '1.0' out_opacity = '1.0'", "Roughness out_roughness = parse_value_input(node.inputs[3]) # Metallic out_metallic = parse_value_input(node.inputs[4]) #", "= True return 'vec3(0.0)' elif node.type == 'TANGENT': return 'wtangent'", "elif op == 'ARCSINE': out_val = 'asin({0})'.format(val1) elif op ==", "= 'max({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend ==", "normal_res != None: curshader.write('n = {0};'.format(normal_res)) def is_parsed(s): global parsed", "res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2,", "tex['name'] = tex_name tex['file'] = '' return '{0}.rgb'.format(texture_store(node, tex, tex_name,", "= parse_value_input(node.inputs[3]) # Metallic out_metallic = parse_value_input(node.inputs[4]) # Normal if", "node.outputs[0]: # Position return 'wposition' elif socket == node.outputs[1]: #", "elif node.type == 'BLACKBODY': t = float(parse_value_input(node.inputs[0])) rgb = [0,0,0]", "socket == node.outputs[4]: # Size particle_info['size'] = True return '1.0'", "'0.0' if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): if parse_surface:", "'SHADER': return parse_shader_input(inp) elif inp.type == 'RGB': return parse_vector_input(inp) elif", "for now return '1.0' elif node.type == 'NORMAL': nor =", "linear, quadratic # Shaders default to quadratic for now return", "vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2]) if rotation[2] != 0.0:", "socket): for i in range(0, len(node.outputs)): if node.outputs[i] == socket:", "node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else", "{0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_3", "= '0.0' elif grad == 'SPHERICAL': f = 'max(1.0 -", "in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): lays =", "= 'float({0} % {1})'.format(val1, val2) out_val = 'mod({0}, {1})'.format(val1, val2)", "extension required for image name') return None ext = s[1].lower()", "'0.0' elif socket == node.outputs[8]: # Ray Depth return '0.0'", "if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec1(inp.default_value) def", "elif op == 'DIVIDE': out_val = '({0} / {1})'.format(val1, val2)", "#conv_from = node.convert_from #conv_to = node.convert_to # Pass throuh return", "You may obtain a copy of the License at #", "len(elems))) # TODO: Make const for i in range(0, len(elems)):", "out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_shader(node, socket):", "{1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac) elif blend ==", "0.0], [0.0, 0.0, 0.0, 0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13,", "tex_name tex['file'] = '' return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link))", "'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name =", "= '1.0' out_emission = '0.0' return out_basecol, out_roughness, out_metallic, out_occlusion,", "parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma) elif node.type", "converted_path, file_format=fmt) arm.assets.add(converted_path) else: # Link image path to assets", "= parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5, {2}, {3},", "{1} * 0.5)'.format(occ1, occ2) out_specular = '({0} * 0.5 +", "'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "out_specular = '1.0' out_opacity = '1.0' out_emission = '0.0' if", "{0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a = normalize(vec3(2.0,", "else: frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) if", "+ 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale) if sample_bump:", "strength != '1.0': frag.write('n.xy *= {0};'.format(strength)) frag.write('n = normalize(TBN *", "# distance = parse_value_input(node.inputs[1]) sample_bump = True height = parse_value_input(node.inputs[2])", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "def socket_index(node, socket): for i in range(0, len(node.outputs)): if node.outputs[i]", "== node.outputs[4]: # Size particle_info['size'] = True return '1.0' elif", "Mipmap linear tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True elif interpolation", "= {2};'.format(facs_var, i, points[i].location[0])) # Map vector return 'mix({0}[{1}], {0}[{1}", "return 'wtangent' elif socket == node.outputs[3]: # True Normal return", "socket == node.outputs[0]: # Index particle_info['index'] = True return 'p_index'", "is different or file does not exist yet if image.packed_file", "to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co", "node.attribute_name == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y,", "0.0, 1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic)", "'OBJECT_INFO': return 'wposition' elif node.type == 'PARTICLE_INFO': if socket ==", "node.use_max: # out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1])", "connect Image Texture directly') parse_normal_map_color_input(node.inputs[5]) # Emission if node.inputs[6].is_linked or", "math import bpy import os import arm.assets import arm.utils import", "{3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2])) # Get index fac_var", "parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_TRANSPARENT':", "elif socket == node.outputs[3]: # True Normal return 'n' if", "= '0' for i in range(1, len(elems)): index += '", "> 1 and node.attribute_name == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x,", "'sRGB' res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link)) curshader.write_textures -=", "res) return res elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked:", "p.name + '_' + s if curshader.write_textures > 0: s", "parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res", "- texCoord.y, 0.0)' elif node.type == 'BUMP': # Interpolation strength", "!= 1.0: out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0],", "* {0}.z), 0.0)'.format(co) res = '(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump:", "'LINEAR': f = '{0}.x'.format(co) elif grad == 'QUADRATIC': f =", "parse_vector_input(node.inputs[10]) # sheen_tint = parse_vector_input(node.inputs[11]) # clearcoat = parse_vector_input(node.inputs[12]) #", "fac_var) # Revert to mix elif blend == 'HUE': out_col", "occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0]) bc2, rough2, met2, occ2,", "= '({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2) out_metallic", "{1})'.format(val1, val2) if node.use_clamp: return 'clamp({0}, 0.0, 1.0)'.format(out_val) else: return", "# subsurface_color = parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5])", "* {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res elif", "== 'On' else 'vec3(0.0)' elif socket == node.outputs[5]: # Velocity", "'(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2])", "curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co,", "return 'fresnel({0}, {1})'.format(ior, dotnv) elif node.type == 'NEW_GEOMETRY': if socket", "## def make_texture(image_node, tex_name, matname=None): tex = {} tex['name'] =", "Revert to mix elif blend == 'HUE': out_col = 'mix({0},", "node.type == 'MATH': val1 = parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1]) op", "abs({0} - {1}), {2})'.format(col1, col2, fac_var) elif blend == 'DARKEN':", "a = node.rotation[0] # out = 'vec3({0}.y * {1} -", "op == 'MULTIPLY': out_val = '({0} * {1})'.format(val1, val2) elif", "parse_vector_input(node.inputs[1]) op = node.operation if op == 'ADD': return '({0}", "parse_opacity: out_opacity = '({0} * 0.5 + {1} * 0.5)'.format(opac1,", "= parse_vector_input(node.inputs[0]) out_emission = '1.0' emission_found = True emission_strength =", "os.makedirs(unpack_path) filepath = os.path.join(unpack_path, image.name + \".jpg\") arm.utils.convert_image(image, filepath, \"JPEG\")", "Revert to mix # out_col = '({0} + {2} *", "tex_name, uv_name)) else: curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name))", "{0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res) sample_bump_res = '' else:", "if node.use_clamp: return 'clamp({0}, 0.0, 1.0)'.format(out_val) else: return out_val elif", "g[2] rgb[2] = ((b[0] * t + b[1]) * t", "image.name + ' - file extension required for image name')", "PI2 + 0.5'.format(co) elif grad == 'QUADRATIC_SPHERE': f = '0.0'", "= True height = parse_value_input(node.inputs[2]) sample_bump = False nor =", "opac2, fac_var, fac_inv_var) elif node.type == 'ADD_SHADER': bc1, rough1, met1,", "to_vec3(v): return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2]) def node_by_type(nodes, ntype):", "os.path.isfile(converted_path): fmt = 'PNG' if new_ext == 'png' else 'JPEG'", "{1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var) elif node.type == 'ADD_SHADER':", "= parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0: out_emission =", "== 'time': curshader.add_uniform('float time', link='_time') return 'time' else: return '0.0'", "'1.0' out_emission = '0.0' return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular,", "== 'On' else '0.0' elif socket == node.outputs[1]: # Age", "'jpg' tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext if", "= True return '1.0' elif node.type == 'VALUE': if node.arm_material_param:", "node.inputs[6].is_linked or node.inputs[6].default_value != 0.0: out_emission = parse_value_input(node.inputs[6]) emission_found =", "!= 'REPEAT': # Extend or clip tex['u_addressing'] = 'clamp' tex['v_addressing']", "not None or not is_ascii(texfile): # Extract packed data /", "out_basecol = parse_vector_input(node.inputs[0]) # Occlusion out_occlusion = parse_value_input(node.inputs[2]) # Roughness", "vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only) def", "l.from_socket) if res == None: return None curshader.write('vec3 {0} =", "image_format != 'RGBA32': # tex['format'] = image_format interpolation = image_node.interpolation", "glsl_type(t): if t == 'RGB' or t == 'RGBA' or", "in ('tga', 'dds')) else 'jpg' tex['file'] = tex['file'].rsplit('.', 1)[0] +", "elif socket == node.outputs[2]: # Lifetime particle_info['lifetime'] = True return", "parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0} * {1}", "node.type == 'VOLUME_SCATTER': pass return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular,", "0.0 rgb[2] = 0.0 else: if (t >= 6365.0): i", "- {0}.z * {2}, {0}.x * {2} + {0}.z *", "node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "node.inputs[2].is_linked: parsed = {} parents = [] normal_parsed = False", "parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp = {0};'.format(out_disp)) def parse_group(node, socket): # Entering", "{0}.y)'.format(uv_name) else: uv_name = 'texCoord' triplanar = node.projection == 'BOX'", "'_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const for i", "= {2};'.format(ys_var, i, points[i].location[1])) # Get index fac_var = name", "blend == 'LINEAR_LIGHT': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "= 0.826270103 rgb[1] = 0.994478524 rgb[2] = 1.56626022 elif (t", "def glsl_type(t): if t == 'RGB' or t == 'RGBA'", "not None: filepath = './' + image.name has_ext = filepath.endswith(('.jpg',", "distributed under the License is distributed on an \"AS IS\"", "= 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2, col3,", "0.0]) elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' +", "else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic({0}", "= True return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0'", "!= 0.0: # a = node.rotation[1] # out = 'vec3({0}.x", "# Reference image name texpath = arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath)", "node.outputs[5]: # Window return 'vec3(0.0)' # 'wvpposition' elif socket ==", "0.5'.format(co) elif grad == 'RADIAL': f = 'atan({0}.y, {0}.x) /", "return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'VALTORGB': # ColorRamp", "elif node.type == 'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0])", "parse_group_input(node, socket) elif node.type == 'ATTRIBUTE': # Pass time till", "+ ar2[1] else: co = ar[1][:-1] post = ')' curshader.write('float", "arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) # if image_format", "= parse_vector_input(node.inputs[3]) if sample_bump_res != '': if node.invert: ext =", "if curshader == tese: return parse_vector_input(node.inputs[1]) else: #space = node.space", "arm.utils.safesrc(name) def get_sdk_path(): return arm.utils.get_sdk_path() def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def", "- ({0}.y) * {2}, {0}.x * {2} + ({0}.y) *", "'_res' def write_result(l): global parsed res_var = res_var_name(l.from_node, l.from_socket) #", "parse_value_input(node.inputs[4]) res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1,", "{0} - 0.0002;'.format(out_opacity)) # Volume # parse_volume_input(node.inputs[1]) # Displacement if", "Pointiness return '0.0' elif node.type == 'HAIR_INFO': # Is Strand", "elems[i].position)) # Mix color # float f = (pos -", "op == 'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1, vec2) elif op ==", "return parse_value_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if st", "assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "points[i].location[0])) # Map vector return 'mix({0}[{1}], {0}[{1} + 1], ({2}", "assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if", "rgb[1] = g[0] * t_inv + g[1] * t +", "if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0: out_emission = parse_value_input(node.inputs[6]) emission_found", "= 'mod({0}, {1})'.format(val1, val2) elif op == 'SINE': out_val =", "geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False): output_node = node_by_type(nodes,", "+ matname + '/' + image.name + ' - file", "sample_bump: write_bump(node, res) return res elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave)", "return parse_shader(l.from_node, l.from_socket) else: out_basecol = 'vec3(0.8)' out_roughness = '0.0'", "inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_vector_input(l.from_node.inputs[0])", "'QUADRATIC': f = '0.0' elif grad == 'EASING': f =", "'vec3(0.0)' # 'wvpposition' elif socket == node.outputs[6]: # Reflection return", "out_val = 'fract({0})'.format(val1) elif op == 'MODULO': # out_val =", "None: parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity,", "# Object Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex' elif socket", "curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);') curshader.write(f'if (texCoordBlend.x >", "= 'max({0}, {1})'.format(val1, val2) elif op == 'LESS_THAN': out_val =", "# Normal if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name() +", "socket == node.outputs[1]: # Is Shadow Ray return '0.0' elif", "i = 2 elif(t >= 1167.0): i = 1 else:", "= parse_vector_input(node.inputs[0]) else: co = 'bposition' col1 = parse_vector_input(node.inputs[1]) col2", "= r[0] * t_inv + r[1] * t + r[2]", "texture_store(node, tex, tex_name, to_linear=False, tex_link=None): global sample_bump global sample_bump_res global", "'CURVE_RGB': # RGB Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1])", "+ '3b', vec + '.y', curves[3].points), vector_curve(name + '3c', vec", "== 'CAMERA': # View Z Depth if socket == node.outputs[1]:", "os.path.isfile(unpack_filepath): fmt = 'PNG' if new_ext == 'png' else 'JPEG'", "to linear if image_node.extension != 'REPEAT': # Extend or clip", "* {2})'.format(spec1, spec2, fac_var, fac_inv_var) out_emission = '({0} * {3}", "cameraProj)' # View Distance else: curshader.add_uniform('vec3 eye', link='_cameraPosition') return 'distance(eye,", ">= 6365.0): i = 5 elif(t >= 3315.0): i =", "may obtain a copy of the License at # #", "fac_inv_var, fac_var)) bc1, rough1, met1, occ1, spec1, opac1, emi1 =", "elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[6]: # Backfacing", "dotnv = 'dotNV' if socket == node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel)", "'vec3(0.0)' # 'vposition' elif socket == node.outputs[5]: # Window return", "1 if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix", "== 'MIX_RGB': fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac'", "1.0 - texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'", "col2, fac_var) # Revert to mix elif blend == 'COLOR':", "Copy non-ascii texture else: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) !=", "0.0 or location[1] != 0.0 or location[2] != 0.0: out", "= True emission_strength = parse_value_input(node.inputs[1]) out_basecol = '({0} * {1})'.format(out_basecol,", "node.type == 'MIX_RGB': fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) +", "if triplanar: curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0);", "lays = mat_user.data.uv_layers # Second uvmap referenced if len(lays) >", "-1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket ==", "preferred? nor = parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type", "Angular Velocity particle_info['angular_velocity'] = True return 'vec3(0.0)' elif node.type ==", "converted_path = os.path.join(unpack_path, tex['file']) # TODO: delete cache when file", "con = _con vert = _vert frag = _frag geom", "scale = parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'vec3(tex_voronoi({0}", "image name texpath = arm.utils.asset_path(filepath) texfile = arm.utils.extract_filename(filepath) tex['file'] =", "on windows if arm.utils.get_os() == 'win': s = filepath.rsplit('.', 1)", "image.name + \".jpg\") arm.utils.convert_image(image, filepath, \"JPEG\") else: arm.log.warn(matname + '/'", "arm.material.mat_state as mat_state import arm.material.cycles_functions as c_functions import shutil emission_found", "fac_inv_var) out_occlusion = '({0} * {3} + {1} * {2})'.format(occ1,", "sample_bump: sample_bump_res = tex_store curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2,", "i, points[i].location[1])) # Get index fac_var = name + '_fac'", "def node_name(s): for p in parents: s = p.name +", "op == 'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1, vec2) elif op ==", "co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic_f({0} *", "def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return arm.utils.safesrc(name) def get_sdk_path():", "arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text) def assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path)", "curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "sample_bump: write_bump(node, res, 0.1) return res elif node.type == 'TEX_POINTDENSITY':", "elif op == 'FLOOR': out_val = 'floor({0})'.format(val1) elif op ==", "col3, scale) if sample_bump: write_bump(node, res) return res elif node.type", "socket == node.outputs[3]: # Object return 'mposition' elif socket ==", "{1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\\ vector_curve(name +", "fac_var) elif blend == 'SCREEN': out_col = '(vec3(1.0) - (vec3(1.0", "res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res) sample_bump_res", "cache size instead powimage = is_pow(image.size[0]) and is_pow(image.size[1]) if interpolation", "1.0: out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1],", "co = 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) col3", "particle_info['location'] = True return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else", "out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp = {0};'.format(out_disp)) def parse_group(node, socket):", "blackbody_table_g[i] b = blackbody_table_b[i] t_inv = 1.0 / t rgb[0]", "output_node.inputs[index] parents.append(node) out_group = parse_input(inp) parents.pop() return out_group def parse_group_input(node,", "+ r[2] rgb[1] = g[0] * t_inv + g[1] *", "= True return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0'", "== node.outputs[5]: # Parametric return 'mposition' elif node.type == 'HAIR_INFO':", "'REPEAT': # Extend or clip tex['u_addressing'] = 'clamp' tex['v_addressing'] =", "res def parse_input(inp): if inp.type == 'SHADER': return parse_shader_input(inp) elif", "= parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2]) return 'vec3({0},", "bezier curve return '(vec3({0}, {1}, {2}) * {3})'.format(\\ vector_curve(name +", "!= None: strength = parse_value_input(strength_input) if strength != '1.0': frag.write('n.xy", "{1}, {2})'.format(r, g, b) elif node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl", "else '0.0' elif socket == node.outputs[4]: # Size particle_info['size'] =", "= vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);')", "h = parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2]) return", "1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN = cotangentFrame(n, -vVec,", "== node.outputs[1]: return '{0}.y'.format(vec) elif socket == node.outputs[2]: return '{0}.z'.format(vec)", "met2, fac_var, fac_inv_var) out_occlusion = '({0} * {3} + {1}", "val2) out_val = 'mod({0}, {1})'.format(val1, val2) elif op == 'SINE':", "= True if parse_opacity: out_opacity = parse_value_input(node.inputs[1]) else: return parse_group(node,", "hasattr(mat_user.data, 'uv_layers'): lays = mat_user.data.uv_layers # Second uvmap referenced if", "{2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name)) else: curshader.write('vec4 {0} = texture({1},", "Ray return '1.0' elif socket == node.outputs[1]: # Is Shadow", "link='_objectInfoRandom') return 'objectInfoRandom' elif node.type == 'PARTICLE_INFO': if socket ==", "fac)) index = '0' for i in range(1, len(elems)): index", "elems[i].color[1], elems[i].color[2])) # Get index fac_var = node_name(node.name) + '_fac'", "4.70366907 rgb[1] = 0.0 rgb[2] = 0.0 else: if (t", "= node.from_instance con.add_elem('tex', 'short2norm') mat = mat_get_material() mat_users = mat_get_material_users()", "= normalize(TBN * n);') con.add_elem('tang', 'short4norm') frag.write_normal -= 1 def", "= 'bposition' scale = parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res", "else: # CELLS res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale) if", "spec2) out_emission = '({0} * 0.5 + {1} * 0.5)'.format(emi1,", "0.5))'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type", "col2, fac_var) elif blend == 'DARKEN': out_col = 'min({0}, {1}", "write_result(l): global parsed res_var = res_var_name(l.from_node, l.from_socket) # Unparsed node", "out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1]) # if", "arm.utils.get_rp() if rpdat.arm_rp_displacement == 'Tessellation' and tese != None: curshader", "Revert to mix elif blend == 'SOFT_LIGHT': out_col = '((1.0", "bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return arm.utils.safesrc(name) def get_sdk_path(): return arm.utils.get_sdk_path() def", "0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y >", "'Anisotropic': interpolation = 'Smart' elif texfilter == 'Linear': interpolation =", "[-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ] if", "'{0}[{1}]'.format(cols_var, index_var) else: # Linear # Write facs array facs_var", "return out_val elif node.type == 'RGBTOBW': col = parse_vector_input(node.inputs[0]) return", "< 965.0): rgb[0] = 4.70366907 rgb[1] = 0.0 rgb[2] =", "{2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) if location[0]", "con.add_elem('col', 'short4norm') # Vcols only for now return 'vcolor' elif", "== 'win': s = filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] + '.' +", "unpack_filepath, file_format=fmt) else: # Write bytes if size is different", "op == 'DIVIDE': out_val = '({0} / {1})'.format(val1, val2) elif", "'VECTOR': res = parse_vector(l.from_node, l.from_socket) if res == None: return", "= bpy.data.worlds['Arm'] # Surface if parse_surface or parse_opacity: parsed =", "parse_opacity=True, parse_displacement=True, basecol_only=False): output_node = node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node !=", "= 'bposition' scale = parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) #", "'0.0' elif grad == 'SPHERICAL': f = 'max(1.0 - sqrt({0}.x", "val2) elif op == 'LESS_THAN': out_val = 'float({0} < {1})'.format(val1,", "if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_GRADIENT':", "len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1])) # Get index fac_var", "normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res) sample_bump_res = '' else: res", "tese = _tese parse_surface = _parse_surface parse_opacity = _parse_opacity basecol_only", "parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2]) return 'vec3({0}, {1},", "os.path.join(unpack_path, image.name + \".jpg\") arm.utils.convert_image(image, filepath, \"JPEG\") else: arm.log.warn(matname +", "= parse_vector_input(node.inputs[0]) elif node.type == 'BSDF_TOON': # write_normal(node.inputs[3]) pass elif", "- {2};'.format(prefix, fac_inv_var, fac_var)) bc1, rough1, met1, occ1, spec1, opac1,", "600 nanometers return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl) # Vector", "socket_index(node, socket) parent = parents.pop() # Leaving group inp =", "'RGBA' or st == 'VECTOR': return '{0}.x'.format(res_var) else: # VALUE", "return '((({0}.r * 0.3 + {0}.g * 0.59 + {0}.b", "or t == 'VECTOR': return 'vec3' else: return 'float' def", "# Tangent return 'wtangent' elif socket == node.outputs[3]: # True", "return arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text) def assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path):", "= 'param_' + node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return nn else:", "'MAPPING': out = parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value", "'0.0' elif socket == node.outputs[6]: # Is Transmission Ray return", "3315.0): i = 4 elif(t >= 1902.0): i = 3", "elif node.type == 'LIGHT_FALLOFF': # Constant, linear, quadratic # Shaders", "return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return arm.utils.safesrc(name) def get_sdk_path(): return arm.utils.get_sdk_path()", "elif node.type == 'BSDF_TRANSPARENT': if parse_opacity: out_opacity = '(1.0 -", "if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve lays =", "= is_pow(image.size[0]) and is_pow(image.size[1]) if interpolation == 'Cubic': # Mipmap", "* {3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var) if", "blend = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else:", "node.type == 'HAIR_INFO': return 'vec3(0.0)' # Tangent Normal elif node.type", "met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1]) bc2, rough2, met2,", "{1})'.format(vec1, vec2) else: return '0.0' ## def vector_curve(name, fac, points):", "node.type == 'HAIR_INFO': # Is Strand # Intercept # Thickness", "os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' + matname + '/' + image.name +", "0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_2 = {1}{2} +", "out_basecol = parse_vector_input(node.inputs[0]) out_emission = '1.0' emission_found = True emission_strength", "'linear' tex['mag_filter'] = 'linear' tex['mipmap_filter'] = 'no' tex['generate_mipmaps'] = False", "return to_vec1(node.outputs[0].default_value) elif node.type == 'WIREFRAME': #node.use_pixel_size # size =", "sample_bump = True height = parse_value_input(node.inputs[2]) sample_bump = False nor", "'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256',", "-4.59745390e-06, 1.09090465e+00] ] blackbody_table_b = [ [0.0, 0.0, 0.0, 0.0],", "* t + b[1]) * t + b[2]) * t", "Mix color # float f = (pos - start) *", "or clip tex['u_addressing'] = 'clamp' tex['v_addressing'] = 'clamp' if image.source", "parse_vector_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE':", "= {0};'.format(out_disp)) def parse_group(node, socket): # Entering group index =", "= False particle_info['velocity'] = False particle_info['angular_velocity'] = False sample_bump =", "= 'floor({0} + 0.5)'.format(val1) elif op == 'FLOOR': out_val =", "Reflection return 'vec3(0.0)' elif node.type == 'UVMAP': #instance = node.from_instance", "elif socket == node.outputs[6]: # Angular Velocity particle_info['angular_velocity'] = True", "rgb[1] = 0.0 rgb[2] = 0.0 else: if (t >=", "{2};'.format(prefix, fac_inv_var, fac_var)) bc1, rough1, met1, occ1, spec1, opac1, emi1", "'wvpposition' elif socket == node.outputs[6]: # Reflection return 'vec3(0.0)' elif", "node.outputs[1]: # Facing return '(1.0 - pow({0}, ({1} < 0.5)", "{0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const for i in range(0,", "elif socket == node.outputs[1]: # Normal return 'n' if curshader.shader_type", "Vcols only for now return 'vcolor' else: # Vector con.add_elem('tex',", "* 0.5)'.format(emi1, emi2) if parse_opacity: out_opacity = '({0} * 0.5", "'1.0' elif node.type == 'VOLUME_ABSORPTION': pass elif node.type == 'VOLUME_SCATTER':", "op == 'TANGENT': out_val = 'tan({0})'.format(val1) elif op == 'ARCSINE':", "fac_var) elif blend == 'MULTIPLY': out_col = 'mix({0}, {0} *", "sample_bump_res global parsed tex_store = store_var_name(node) if is_parsed(tex_store): return tex_store", "'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else: # Write bytes if size", "= parse_shader_input(node.inputs[1]) if parse_surface: out_basecol = '({0} + {1})'.format(bc1, bc2)", "global sample_bump global sample_bump_res con = _con vert = _vert", "node_name(node.name) tex = make_texture(node, tex_name) tex_link = node.name if node.arm_material_param", "with open(unpack_filepath, 'wb') as f: f.write(image.packed_file.data) # Copy non-ascii texture", "assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name(): return mat_state.material.name def mat_batch(): return mat_state.batch", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "out_val = 'pow({0}, {1})'.format(val1, val2) elif op == 'LOGARITHM': out_val", "= ',' + ar2[1] else: co = ar[1][:-1] post =", "'VECTOR': return res_var else: # VALUE return 'vec3({0})'.format(res_var) else: if", "# Camera return 'vec3(0.0)' # 'vposition' elif socket == node.outputs[5]:", "None if tex != None: curshader.write_textures += 1 to_linear =", "tex['format'] = image_format interpolation = image_node.interpolation rpdat = arm.utils.get_rp() texfilter", "0.0: out_emission = parse_value_input(node.inputs[6]) emission_found = True if parse_opacity: out_opacity", "{0} = {1};'.format(fac_var, fac)) col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2])", "texCoord.y, 0.0)' elif node.type == 'BUMP': # Interpolation strength strength", "os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath =", "= parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5]) # specular_tint", "res) return res elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() +", "= 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res) sample_bump_res =", "node.type == 'BLACKBODY': t = float(parse_value_input(node.inputs[0])) rgb = [0,0,0] blackbody_table_r", "= 'tex_voronoi({0} * {1}).a'.format(co, scale) else: # CELLS res =", "= True mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked:", "'0.0' elif socket == node.outputs[1]: # Age particle_info['age'] = True", "= parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op = node.operation if op", "if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' col1", "{0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2],", "'HAIR_INFO': # Is Strand # Intercept # Thickness return '0.5'", "{1}, {2}) * {3})'.format(\\ vector_curve(name + '0', vec + '.x',", "{2})'.format(x, y, z) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0])", "to mix elif blend == 'BURN': out_col = 'mix({0}, {1},", "elif node.type == 'LIGHT_PATH': if socket == node.outputs[0]: # Is", "if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec3(inp.default_value) def", "= False particle_info['angular_velocity'] = False sample_bump = False sample_bump_res =", "= node.vector_type #conv_from = node.convert_from #conv_to = node.convert_to # Pass", "def warn(text): arm.log.warn(text) def assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def", "parse_vector_input(inp) elif inp.type == 'RGBA': return parse_vector_input(inp) elif inp.type ==", "'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif socket == node.outputs[3]: #", "y = parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(x,", "Copyright 2011-2013 Blender Foundation # # Licensed under the Apache", "1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1]", "blackbody_table_r = [ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03,", "* normalize(texn);') else: frag.write('vec3 n = ({0}) * 2.0 -", "Get filepath filepath = image.filepath if filepath == '': if", "parse_value_input(node.inputs[3]) # Metallic out_metallic = parse_value_input(node.inputs[4]) # Normal if node.inputs[5].is_linked", "def parse_normal_map_color_input(inp, strength_input=None): global normal_parsed global frag if basecol_only: return", "elif node.type == 'PARTICLE_INFO': if socket == node.outputs[3]: # Location", "out_col = parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast)", "if rpdat.arm_rp_displacement == 'Tessellation' and tese != None: curshader =", "const for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var, i,", "'bposition' elif socket == node.outputs[1]: # Normal return 'n' elif", "node.type == 'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness", "+ '.' + new_ext if image.packed_file is not None or", "arm.log.warn(text) def assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name(): return", "out_opacity = '({0} * {3} + {1} * {2})'.format(opac1, opac2,", "parse_opacity: if len(node.inputs) > 20: out_opacity = parse_value_input(node.inputs[18]) elif node.type", "node.use_min: # out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1])", "return parse_vector_input(inp) elif inp.type == 'RGBA': return parse_vector_input(inp) elif inp.type", "# View Vector in camera space return 'vVecCam' elif node.type", "{1})'.format(val1, val2) elif op == 'DIVIDE': out_val = '({0} /", "or st == 'VECTOR': return '{0}.x'.format(res_var) else: # VALUE return", "mat_texture_grad(): curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name))", "{0}.y * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))", "for i in range(0, len(elems)): curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var,", "License. # import math import bpy import os import arm.assets", "- {0}), {1})'.format(blend, dotnv) elif socket == node.outputs[1]: # Facing", "Height multiplier # distance = parse_value_input(node.inputs[1]) sample_bump = True height", "= parse_vector(l.from_node, l.from_socket) if res == None: return None curshader.write('vec3", "'mod({0}, {1})'.format(val1, val2) elif op == 'SINE': out_val = 'sin({0})'.format(val1)", "def parse_vector_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type ==", "Is Singular Ray return '0.0' elif socket == node.outputs[5]: #", "Facing return '(1.0 - pow({0}, ({1} < 0.5) ? 2.0", "fac = parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation elems = node.color_ramp.elements if", "else: return to_vec1(inp.default_value) def parse_value(node, socket): global particle_info global sample_bump", "inp.is_uniform: return to_uniform(inp) else: return to_vec1(inp.default_value) def parse_value(node, socket): global", "= store_var_name(node) + '_bump' # Testing.. get function parts.. ar", "vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1]) return out elif node.type ==", "wposition)' elif node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0]) if", "* {3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var) out_roughness", "* {2}, {0}.x * {2} + {0}.z * {1}, 0.0)'.format(out,", "elif socket == node.outputs[1]: # Age particle_info['age'] = True return", "col2 = parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4]) res", "[-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ]", "blend == 'SCREEN': out_col = '(vec3(1.0) - (vec3(1.0 - {2})", "parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1]) op = node.operation if op ==", "rotation, Z axis for now.. a = rotation[2] # x", "'{0}.x'.format(vec) elif socket == node.outputs[1]: return '{0}.y'.format(vec) elif socket ==", "'INVERT': fac = parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0)", "if parse_opacity: out_opacity = '({0} * {3} + {1} *", "= parse_value_input(node.inputs[1]) else: return parse_group(node, socket) elif node.type == 'GROUP_INPUT':", "Blender seems to load full images on size request, cache", "emission_found = False particle_info = {} particle_info['index'] = False particle_info['age']", "== node.outputs[5]: # Window return 'vec3(0.0)' # 'wvpposition' elif socket", "ANY KIND, either express or implied. # See the License", "== 'DISPLACEMENT': height = parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1]) scale =", "elif(t >= 1902.0): i = 3 elif(t >= 1449.0): i", "return '0.0' elif socket == node.outputs[2]: # Is Diffuse Ray", "# See the License for the specific language governing permissions", "== 'ABSOLUTE': out_val = 'abs({0})'.format(val1) elif op == 'MINIMUM': out_val", "- file not found(' + filepath + ')') return None", "texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y > 0) {tex_store} +=", "node.outputs[7]: # Ray Length return '0.0' elif socket == node.outputs[8]:", "'abs({0})'.format(val1) elif op == 'MINIMUM': out_val = 'min({0}, {1})'.format(val1, val2)", "- {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_HAIR': pass elif node.type ==", "'({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2) out_metallic =", "{0}.z * {2}, {0}.y * {2} + {0}.z * {1},", "0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[1] != 0.0: # a", "return parse_group_input(node, socket) elif node.type == 'VERTEX_COLOR': con.add_elem('col', 'short4norm') #", "'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name) else: uv_name = 'texCoord' triplanar =", "== 'ATTRIBUTE': if socket == node.outputs[0]: # Color con.add_elem('col', 'short4norm')", "return node_name(node.name) + '_' + safesrc(socket.name) + '_res' def write_result(l):", "{3}))'.format(out, scale[0], scale[1], scale[2]) if rotation[2] != 0.0: # ZYX", "- {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var) elif blend", "= mat_get_material() mat_users = mat_get_material_users() if mat_users != None and", "0.5 + {1} * 0.5)'.format(rough1, rough2) out_metallic = '({0} *", "socket == node.outputs[1]: return '{0}.y'.format(vec) elif socket == node.outputs[2]: return", "{3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var) out_specular =", "# TODO: Khamake converts .PNG to .jpg? Convert ext to", "= parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(x, y, z) elif node.type", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "+= 1 to_linear = node.image != None and node.image.colorspace_settings.name ==", "{2}, {3}))'.format(out, node.max[0], node.max[1]) return out elif node.type == 'NORMAL':", "'0.0' elif node.type == 'OBJECT_INFO': if socket == node.outputs[2]: #", "node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec1(node.outputs[0].default_value) elif", "= parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(x, y,", "= textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name)) else: curshader.write('vec4 {0}", "implemented if node.attribute_name == 'time': curshader.add_uniform('float time', link='_time') return 'time'", "== node.outputs[0]: # Color con.add_elem('col', 'short4norm') # Vcols only for", "{0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x,", "[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [-2.02524603e-11, 1.79435860e-07,", "parse_value_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if st ==", "'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var) elif blend ==", "= blackbody_table_g[i] b = blackbody_table_b[i] t_inv = 1.0 / t", "vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False): output_node", "os.makedirs(unpack_path) converted_path = os.path.join(unpack_path, tex['file']) # TODO: delete cache when", "1 : 0)'.format(fac_var, elems[i].position) # Write index index_var = node_name(node.name)", "[-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08,", "'float' def to_uniform(inp): uname = safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) +", "and node.attribute_name == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 -", "frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y =", "math.cos(a), math.sin(a)) if location[0] != 0.0 or location[1] != 0.0", "v = parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif node.type ==", "texture unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path):", "'DISPLACEMENT': height = parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2])", "{uv_name}.xy) * texCoordBlend.x;') curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name},", "= True # clearcoar_normal = parse_vector_input(node.inputs[20]) # tangent = parse_vector_input(node.inputs[21])", "tex, tex_name, to_linear=False, tex_link=tex_link)) else: global parsed tex_store = store_var_name(node)", "0.11) / 3.0) * 2.5)'.format(col) elif node.type == 'SEPHSV': return", "elif op == 'ABSOLUTE': out_val = 'abs({0})'.format(val1) elif op ==", "elif blend == 'DIFFERENCE': out_col = 'mix({0}, abs({0} - {1}),", "for now mat = mat_get_material() mat_users = mat_get_material_users() if mat_users", "else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_wave_f({0}", "socket == node.outputs[2]: # Object Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return", "* 0.5 + {1} * 0.5)'.format(opac1, opac2) elif node.type ==", "tex_store curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name))", "'VECT_TRANSFORM': #type = node.vector_type #conv_from = node.convert_from #conv_to = node.convert_to", "return 'bposition' elif socket == node.outputs[1]: # Normal return 'n'", "con.add_elem('col', 'short4norm') # Vcols only for now return 'vcolor' else:", "curshader.write('int {0} = {1};'.format(index_var, index)) # Linear # Write Xs", "parse_surface: frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic))", "'.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name", "= vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures -= 1 return '{0}.rgb'.format(tex_store)", "node.min[1]) # if node.use_max: # out = 'min({0}, vec3({1}, {2},", "'point' tex['mag_filter'] = 'point' # else defaults to linear if", "* 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a", "curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co,", "= parse_vector_input(node.inputs[1]) # subsurface_radius = parse_vector_input(node.inputs[2]) # subsurface_color = parse_vector_input(node.inputs[3])", "+ b[1]) * t + b[2]) * t + b[3]", "now return 'vcolor' elif node.type == 'ATTRIBUTE': if socket ==", "return '0.0' elif socket == node.outputs[9]: # Transparent Depth return", "* 0.5 + {1} * 0.5)'.format(emi1, emi2) if parse_opacity: out_opacity", "else: res = 'n' return res elif node.type == 'MAPPING':", "op == 'ARCTAN2': out_val = 'atan({0}, {1})'.format(val1, val2) if node.use_clamp:", "global parents global normal_parsed global curshader # Active shader -", "write_normal(node.inputs[4]) out_basecol = parse_vector_input(node.inputs[0]) elif node.type == 'BSDF_TOON': # write_normal(node.inputs[3])", "scale = parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'tex_voronoi({0}", "elif inp.type == 'VECTOR': return parse_vector_input(inp) elif inp.type == 'VALUE':", "= parse_value(l.from_node, l.from_socket) if res == None: return None curshader.write('float", "size = parse_value_input(node.inputs[0]) return '0.0' elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick)", "# Displacement if _parse_displacement and disp_enabled() and node.inputs[2].is_linked: parsed =", "parents.append(parent) # Return to group return res def parse_input(inp): if", "to .jpg? Convert ext to lowercase on windows if arm.utils.get_os()", "= textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_3 =", "interpolation = 'Linear' elif texfilter == 'Point': interpolation = 'Closest'", "as # Copyright 2011-2013 Blender Foundation # # Licensed under", "'' return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link)) else: tex_store =", "\".jpg\") arm.utils.convert_image(image, filepath, \"JPEG\") else: arm.log.warn(matname + '/' + image.name", "elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "elif node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type ==", "= [ [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],", "y * cos(theta) out = 'vec3({0}.x * {1} - ({0}.y)", "= 'tex_voronoi({0} * {1}).rgb'.format(co, scale) if sample_bump: write_bump(node, res) return", "return None # Get filepath filepath = image.filepath if filepath", "vec = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.x'.format(vec) elif", "val2) elif op == 'SINE': out_val = 'sin({0})'.format(val1) elif op", "= parse_vector_input(node.inputs[3]) return '(vec3({0}) * {1})'.format(height, scale) def parse_normal_map_color_input(inp, strength_input=None):", "_parse_displacement and disp_enabled() and node.inputs[2].is_linked: parsed = {} parents =", "if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name() + ' -", "texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z > 0) {tex_store} +=", "res = 'tex_voronoi({0} * {1}).a'.format(co, scale) else: # CELLS res", "965.0): rgb[0] = 4.70366907 rgb[1] = 0.0 rgb[2] = 0.0", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "nodes: if n.type == ntype: return n def socket_index(node, socket):", "2.89727618e-05, 1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ] blackbody_table_g = [ [-7.50343014e+02,", "'CEIL': out_val = 'ceil({0})'.format(val1) elif op == 'FRACT': out_val =", "'short4norm') frag.write_normal -= 1 def parse_value_input(inp): if inp.is_linked: l =", "= -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);') frag.write('n", "elif node.type == 'UVMAP': #instance = node.from_instance con.add_elem('tex', 'short2norm') mat", ".PNG to .jpg? Convert ext to lowercase on windows if", ".jpg to /unpacked filepath += '.raw' elif image.source == \"GENERATED\":", "b[3] # Pass constant return to_vec3([rgb[0], rgb[1], rgb[2]]) elif node.type", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend,", "== 'LIGHT_FALLOFF': # Constant, linear, quadratic # Shaders default to", "node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif socket == node.outputs[1]: # TODO: is", "else: return out_col elif node.type == 'BLACKBODY': t = float(parse_value_input(node.inputs[0]))", "== 'COLOR': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) #", "node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1]) op", "* {0}.y + {0}.z * {0}.z), 0.0)'.format(co) res = 'vec3(clamp({0},", "0.5 + {1} * 0.5)'.format(spec1, spec2) out_emission = '({0} *", "'clamp' if image.source == 'MOVIE': tex['source'] = 'movie' tex['min_filter'] =", "'({0} * {3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var)", "{2}, {3})'.format(co, col1, col2, scale) if sample_bump: write_bump(node, res) return", "Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None elif node.type == 'VECT_TRANSFORM': #type", "path to assets # TODO: Khamake converts .PNG to .jpg?", "Is Shadow Ray return '0.0' elif socket == node.outputs[2]: #", "{0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name)) else: curshader.write('vec4", "elif blend == 'MULTIPLY': out_col = 'mix({0}, {0} * {1},", "elif node.type == 'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4]) # Revert to", "'WIREFRAME': #node.use_pixel_size # size = parse_value_input(node.inputs[0]) return '0.0' elif node.type", "col2, fac_var) elif blend == 'MULTIPLY': out_col = 'mix({0}, {0}", "== 'VECTOR': return parse_vector_input(inp) elif inp.type == 'VALUE': return parse_value_input(inp)", "[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0,", "= parse_value_input(node.inputs[3]) res = 'tex_noise({0} * {1})'.format(co, scale) if sample_bump:", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "return '{0}.y'.format(vec) elif socket == node.outputs[2]: return '{0}.z'.format(vec) elif node.type", "+= 1 if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN", "- texCoord.y, 0.0)' elif socket == node.outputs[3]: # Object return", "parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3]) return '(vec3({0}) *", "out_opacity, out_emission = parse_shader_input(node.inputs[0]) if parse_surface: frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness", "post, scl)) sample_bump = False def to_vec1(v): return str(v) def", "fac = parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0) -", "fac_var) # Revert to mix elif blend == 'COLOR': out_col", "return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac)", "socket): global emission_found out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic", "if socket == node.outputs[6]: # Backfacing return '(1.0 - float(gl_FrontFacing))'", "* 0.5'.format(co) elif grad == 'RADIAL': f = 'atan({0}.y, {0}.x)", "'VALUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "{4}){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float {0}_3 = {1}{2} +", "elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 = parse_vector_input(node.inputs[1])", "= 'bposition' grad = node.gradient_type if grad == 'LINEAR': f", "elif node.type == 'COMBRGB': r = parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1])", "PBR'): # Displacement if socket == node.outputs[1]: return parse_value_input(node.inputs[7]) else:", "= {} tex['name'] = tex_name tex['file'] = '' return '{0}.a'.format(texture_store(node,", "if socket == node.outputs[0]: # Generated - bounds return 'bposition'", "'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' return 'fresnel({0}, {1})'.format(ior, dotnv)", "# you may not use this file except in compliance", "image name') return None ext = s[1].lower() do_convert = ext", "'0', vec + '.x', curves[0].points), vector_curve(name + '1', vec +", "return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket", "b) elif node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0]) #", "out = parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value location", "'.z', curves[2].points), fac) elif node.type == 'CURVE_RGB': # RGB Curves", "elif node.type == 'ATTRIBUTE': # Pass time till drivers are", "tese != None: curshader = tese else: curshader = vert", "throuh return parse_vector_input(node.inputs[0]) elif node.type == 'COMBXYZ': x = parse_value_input(node.inputs[0])", "res elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' +", "yet if image.packed_file is not None: if not os.path.isfile(unpack_filepath) or", "'MINIMUM': out_val = 'min({0}, {1})'.format(val1, val2) elif op == 'MAXIMUM':", "to mix elif blend == 'HUE': out_col = 'mix({0}, {1},", "return '(1.0 - float(gl_FrontFacing))' elif socket == node.outputs[7]: # Pointiness", "= arm.utils.get_rp() texfilter = rpdat.arm_texture_filter if texfilter == 'Anisotropic': interpolation", "0.3 + {0}.g * 0.59 + {0}.b * 0.11) /", "'CAMERA': # View Z Depth if socket == node.outputs[1]: curshader.add_include('std/math.glsl')", "elif node.type == 'GAMMA': out_col = parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1])", "'MIX_RGB': fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' curshader.write('float", "frag for surface / tese for displacement global con global", "particle_info['velocity'] = False particle_info['angular_velocity'] = False sample_bump = False sample_bump_res", "tex_noise({0} * {1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co,", "tex_name image = image_node.image if matname is None: matname =", "def assets_add(path): arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name(): return mat_state.material.name", "vec2) elif op == 'NORMALIZE': return 'normalize({0})'.format(vec1) elif node.type ==", "is_pow(image.size[1]) if interpolation == 'Cubic': # Mipmap linear tex['mipmap_filter'] =", "= parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4]) res =", "= node.name if node.arm_material_param else None if tex != None:", "parse_value_input(node.inputs[1]) res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale) if", "== 'MIX_SHADER': prefix = '' if node.inputs[0].is_linked else 'const '", "{1}).a)'.format(co, scale) else: # CELLS res = 'tex_voronoi({0} * {1}).rgb'.format(co,", "# Is Reflection Ray return '0.0' elif socket == node.outputs[6]:", "curshader.write_textures > 0: s += '_texread' s = safesrc(s) if", "1: arm.log.warn(matname + '/' + image.name + ' - file", "'0.0' elif socket == node.outputs[2]: # Lifetime particle_info['lifetime'] = True", "curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0]) # Roughly map to cycles -", "if (t >= 12000): rgb[0] = 0.826270103 rgb[1] = 0.994478524", "tex['mag_filter'] = 'point' # else defaults to linear if image_node.extension", "'DIFFERENCE': out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var)", "* {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) #", "parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7]) # aniso = parse_vector_input(node.inputs[8]) # aniso_rot", "aniso = parse_vector_input(node.inputs[8]) # aniso_rot = parse_vector_input(node.inputs[9]) # sheen =", "= node.operation if op == 'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1, vec2)", "'ATTRIBUTE': # Pass time till drivers are implemented if node.attribute_name", "= '({0} - {1})'.format(val1, val2) elif op == 'MULTIPLY': out_val", "# Revert to mix elif blend == 'SATURATION': out_col =", "== 'sRGB' res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link)) curshader.write_textures", "'1', '4', '3'] curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float", "and tese != None: curshader = tese else: curshader =", "Unparsed node if not is_parsed(res_var): parsed[res_var] = True st =", "{0}.z), 0.0)'.format(co) res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0} *", "# Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type ==", "size instead powimage = is_pow(image.size[0]) and is_pow(image.size[1]) if interpolation ==", "elif node.type == 'HAIR_INFO': return 'vec3(0.0)' # Tangent Normal elif", "def write_bump(node, res, scl=0.001): global sample_bump global sample_bump_res sample_bump_res =", "st == 'RGBA' or st == 'VECTOR': return '{0}.x'.format(res_var) else:", "_ are reserved s = s.replace('_', '_x') return s ##", "Window return 'vec3(0.0)' # 'wvpposition' elif socket == node.outputs[6]: #", "{2})'.format(v[0], v[1], v[2]) def node_by_type(nodes, ntype): for n in nodes:", "out_metallic = '1.0' elif node.type == 'AMBIENT_OCCLUSION': if parse_surface: #", "!= 0 def is_ascii(s): return len(s) == len(s.encode()) ## def", "'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = '1.0'", "'RGBTOBW': col = parse_vector_input(node.inputs[0]) return '((({0}.r * 0.3 + {0}.g", "{1})'.format(val1, val2) elif op == 'MULTIPLY': out_val = '({0} *", "range(1, len(elems)): index += ' + ({0} > {1} ?", "group index = socket_index(node, socket) output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if", "'tex_wave_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res) return res", "{1})'.format(vec1, vec2) elif op == 'AVERAGE': return '(({0} + {1})", "'On' else '0.0' elif socket == node.outputs[4]: # Size particle_info['size']", "'vec3({0}, {1}, {2})'.format(r, g, b) elif node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb)", "Thickness return '0.5' elif node.type == 'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0])", "parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement,", "parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'tex_voronoi({0} * {1}).a'.format(co,", "post, scl)) curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res,", "'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4]) # Revert to glossy out_basecol =", "def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity,", "if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = '1.0' out_metallic", "scale) else: # CELLS res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale)", "'({0} + {1})'.format(val1, val2) elif op == 'SUBTRACT': out_val =", "if not has_ext: # Raw bytes, write converted .jpg to", "node.type == 'INVERT': fac = parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1]) return", "'DIAGONAL': f = '({0}.x + {0}.y) * 0.5'.format(co) elif grad", "0.5 + {1} * 0.5)'.format(emi1, emi2) if parse_opacity: out_opacity =", "node_name(s): for p in parents: s = p.name + '_'", "= parse_vector_input(node.inputs[0]) else: co = 'bposition' grad = node.gradient_type if", "+ new_ext if image.packed_file is not None or not is_ascii(texfile):", "== node.outputs[2]: return '{0}.b'.format(col) elif node.type == 'SEPXYZ': vec =", "> 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z", "elif node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0]) # Roughly", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "tex_link=tex_link)) else: tex_store = store_var_name(node) # Pink color for missing", "== 'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1, vec2) elif op == 'NORMALIZE':", "surface / tese for displacement global con global vert global", "'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1, vec2) else: return '0.0' ## def", "Linear # Write Xs array facs_var = name + '_xs'", "frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);') frag.write('n = TBN *", "parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'VALTORGB': #", "n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) if strength_input !=", "(finish - start)) return 'mix({0}[{1}], {0}[{1} + 1], ({2} -", "= parse_value_input(node.inputs[0]) g = parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2]) return 'vec3({0},", "!= 0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True # clearcoar_normal", "{3}, 1.0-{4}))'.format(col, hue, sat, val, fac) elif node.type == 'INVERT':", "Apache License, Version 2.0 (the \"License\"); # you may not", "in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0])) # Map", "socket) output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT') if output_node == None: return", "Vector elif node.type == 'CAMERA': # View Vector in camera", "return 'objectInfoRandom' elif node.type == 'PARTICLE_INFO': if socket == node.outputs[0]:", "= parse_value_input(node.inputs[0]) # Roughly map to cycles - 450 to", "pass elif node.type == 'HOLDOUT': if parse_surface: # Occlude out_occlusion", "{2};'.format(facs_var, i, elems[i].position)) # Mix color # float f =", "# 'wvpposition' elif socket == node.outputs[6]: # Reflection return 'vec3(0.0)'", "scl)) curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre,", "work licensed as # Copyright 2011-2013 Blender Foundation # #", "node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition' grad =", "elif op == 'NORMALIZE': return 'normalize({0})'.format(vec1) elif node.type == 'DISPLACEMENT':", "Active shader - frag for surface / tese for displacement", "uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name) else: uv_name = 'texCoord'", "0.0, 1.0, 1.0);'.format(tex_store)) curshader.write_textures -= 1 return '{0}.rgb'.format(tex_store) elif node.type", "arm.assets import arm.utils import arm.make_state import arm.log import arm.material.mat_state as", "index_var = node_name(node.name) + '_i' curshader.write('int {0} = {1};'.format(index_var, index))", "'linear' tex['generate_mipmaps'] = True elif interpolation == 'Closest': tex['min_filter'] =", "f = (pos - start) * (1.0 / (finish -", "[4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01], [3.78765709e+03,", "parse_opacity: out_opacity = '({0} * {3} + {1} * {2})'.format(opac1,", "Intercept # Thickness return '0.5' elif node.type == 'LAYER_WEIGHT': blend", "!= None: curshader.write_textures += 1 to_linear = node.image != None", "through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_SKY': #", "'LIGHT_FALLOFF': # Constant, linear, quadratic # Shaders default to quadratic", "node.node_tree.name.startswith('Armory PBR'): if parse_surface: # Base color out_basecol = parse_vector_input(node.inputs[0])", "{1})'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res elif", "{1})))'.format(dotnv, blend) elif node.type == 'LIGHT_PATH': if socket == node.outputs[0]:", "return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type == 'RGB':", "socket == node.outputs[4]: # Is Singular Ray return '0.0' elif", "Pink color for missing texture curshader.write('vec4 {0} = vec4(1.0, 0.0,", "node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 / (1.0 - {0}),", "link='{0}'.format(node.name)) return nn else: return to_vec1(node.outputs[0].default_value) elif node.type == 'WIREFRAME':", "nor = parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type ==", "'{0}.z'.format(vec) elif node.type == 'VECT_MATH': vec1 = parse_vector_input(node.inputs[0]) vec2 =", "== 'SEPHSV': return '0.0' elif node.type == 'SEPRGB': col =", "vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1',", "= name + '_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make", "res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link)) curshader.write_textures -= 1", "elif blend == 'VALUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "res elif node.type == 'MAPPING': out = parse_vector_input(node.inputs[0]) scale =", "'acos({0})'.format(val1) elif op == 'ARCTANGENT': out_val = 'atan({0})'.format(val1) elif op", "unpack_filepath = os.path.join(unpack_path, tex['file']) if do_convert: if not os.path.isfile(unpack_filepath): fmt", "node if not is_parsed(res_var): parsed[res_var] = True st = l.from_socket.type", "col2, fac_var) # Revert to mix elif blend == 'SATURATION':", "+ {1} * 0.5)'.format(met1, met2) out_occlusion = '({0} * 0.5", "st == 'VALUE': res = parse_value(l.from_node, l.from_socket) if res ==", "tex, tex_name, tex_link=tex_link)) curshader.write_textures -= 1 return res elif node.image", "is_parsed(tex_store): return tex_store parsed[tex_store] = True mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D", "if interpolation == 'Cubic': # Mipmap linear tex['mipmap_filter'] = 'linear'", "subsurface_radius = parse_vector_input(node.inputs[2]) # subsurface_color = parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4])", "# Occlusion out_occlusion = parse_value_input(node.inputs[2]) # Roughness out_roughness = parse_value_input(node.inputs[3])", "i = 1 else: i = 0 r = blackbody_table_r[i]", "{0}.z * {2}, {0}.x * {2} + {0}.z * {1},", "mapping.curves[0].points[0].handle_type # bezier curve return '(vec3({0}, {1}, {2}) * {3})'.format(\\", "curves[3].points)) elif node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0]) s", "= [] normal_parsed = False curshader = frag out_basecol, out_roughness,", "i, elems[i].color[0], elems[i].color[1], elems[i].color[2])) # Get index fac_var = node_name(node.name)", "or scale[1] != 1.0 or scale[2] != 1.0: out =", "else '0.0' elif socket == node.outputs[2]: # Lifetime particle_info['lifetime'] =", "= blackbody_table_r[i] g = blackbody_table_g[i] b = blackbody_table_b[i] t_inv =", "'vcolor' elif node.type == 'ATTRIBUTE': if socket == node.outputs[0]: #", "= node.from_instance if socket == node.outputs[0]: # Generated - bounds", "parse_vector_input(node.inputs[1]) op = node.operation if op == 'DOT_PRODUCT': return 'dot({0},", "* {0}.z), 0.0)'.format(co) res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump:", "== 'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type", "# Revert to glossy out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1])", "co = 'bposition' scale = parse_value_input(node.inputs[3]) res = 'tex_checker_f({0}, {1})'.format(co,", "== 'RGBA' or st == 'VECTOR': res = parse_vector(l.from_node, l.from_socket)", "elif node.type == 'SEPXYZ': vec = parse_vector_input(node.inputs[0]) if socket ==", "* {1})'.format(out_basecol, emission_strength) elif node.type == 'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3])", "parse_value_input(node.inputs[6]) emission_found = True if parse_opacity: out_opacity = parse_value_input(node.inputs[1]) else:", "node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): # Displacement if socket", "inp.links[0] if l.from_node.type == 'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket)", "{0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion))", "fac_var) # Revert to mix elif blend == 'BURN': out_col", "socket == node.outputs[10]: # Transmission Depth return '0.0' elif node.type", "return '1.0' elif node.type == 'VALUE': if node.arm_material_param: nn =", "3.98995841e-01] ] blackbody_table_g = [ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04,", "pre, co, post, scl)) curshader.write('float {0}_4 = {1}{2} + vec3(0.0,", "True return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif", "tex_name, uv_name)) if sample_bump: sample_bump_res = tex_store curshader.write('float {0}_1 =", "'ARCCOSINE': out_val = 'acos({0})'.format(val1) elif op == 'ARCTANGENT': out_val =", "= _basecol_only emission_found = False particle_info = {} particle_info['index'] =", "uv_name)) curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name))", "elif node.type == 'VOLUME_ABSORPTION': pass elif node.type == 'VOLUME_SCATTER': pass", "write_normal(node.inputs[3]) pass elif node.type == 'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1]) if", "'GAMMA': out_col = parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col,", "return res elif node.type == 'LIGHT_FALLOFF': # Constant, linear, quadratic", "if sample_bump: write_bump(node, res, 0.1) return res elif node.type ==", "TODO: is parse_value path preferred? nor = parse_vector_input(node.inputs[0]) return 'vec3(dot({0},", "in compliance with the License. # You may obtain a", "return res_var else: # VALUE return 'vec3({0})'.format(res_var) else: if inp.type", "'{0}.b'.format(col) elif node.type == 'SEPXYZ': vec = parse_vector_input(node.inputs[0]) if socket", "detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) res = 'tex_noise({0}", "* {2})'.format(col1, col2, fac_var) elif blend == 'LIGHTEN': out_col =", "and num != 0 def is_ascii(s): return len(s) == len(s.encode())", "'BSDF_PRINCIPLED': if parse_surface: write_normal(node.inputs[19]) out_basecol = parse_vector_input(node.inputs[0]) # subsurface =", "== node.outputs[0]: return '{0}.x'.format(vec) elif socket == node.outputs[1]: return '{0}.y'.format(vec)", "arm.assets.add(unpack_filepath) else: if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' + matname +", "0.0, 0.0, 0.0], [-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02], [-2.22463426e-13, -1.55078698e-08, 3.81675160e-04,", "as f: f.write(image.packed_file.data) # Copy non-ascii texture else: if not", "unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path)", "safesrc(socket.name) + '_res' def write_result(l): global parsed res_var = res_var_name(l.from_node,", "rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2]) if parse_surface:", "{1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif blend", "node.outputs[3]: # True Normal return 'n' if curshader.shader_type == 'frag'", "== tese: return parse_vector_input(node.inputs[1]) else: #space = node.space #map =", "if node.type == 'GROUP': return parse_group(node, socket) elif node.type ==", "res_var else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return", "'.' + new_ext if image.packed_file is not None or not", "out_col = '((1.0 - {2}) * {0} + {2} *", "* {3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var) out_occlusion", "= ['2', '1', '4', '3'] curshader.write('float {0}_fh1 = {0}_{1} -", "'({0} / {1})'.format(val1, val2) elif op == 'POWER': out_val =", "fac_var) elif blend == 'DIFFERENCE': out_col = 'mix({0}, abs({0} -", "0 r = blackbody_table_r[i] g = blackbody_table_g[i] b = blackbody_table_b[i]", "Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves = node.mapping.curves", "unpack_filepath) arm.assets.add(unpack_filepath) else: if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' + matname", "'sqrt({0})'.format(val1) elif op == 'ABSOLUTE': out_val = 'abs({0})'.format(val1) elif op", "write_bump(node, res, 0.1) return res elif node.type == 'TEX_POINTDENSITY': #", "= '1.0' emission_found = True emission_strength = parse_value_input(node.inputs[1]) out_basecol =", "Curve lays = mat_user.data.uv_layers # Second uvmap referenced if len(lays)", "parse_vector_input(node.inputs[0]) out_roughness = '1.0' out_metallic = '1.0' elif node.type ==", "out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_shader(node, socket): global", "with the License. # You may obtain a copy of", "= os.path.join(unpack_path, image.name + \".jpg\") arm.utils.convert_image(image, filepath, \"JPEG\") else: arm.log.warn(matname", "= 'clamp' if image.source == 'MOVIE': tex['source'] = 'movie' tex['min_filter']", "arm.assets.add(converted_path) else: # Link image path to assets # TODO:", "spec1, opac1, emi1 = parse_shader_input(node.inputs[1]) bc2, rough2, met2, occ2, spec2,", "Ray return '1.0' elif socket == node.outputs[3]: # Is Glossy", "does not exist yet if image.packed_file is not None: if", "fac_var, fac_inv_var) if parse_opacity: out_opacity = '({0} * {3} +", "== node.outputs[7]: # Pointiness return '0.0' elif node.type == 'HAIR_INFO':", "#obj = node.object #instance = node.from_instance if socket == node.outputs[0]:", "> 20: out_opacity = parse_value_input(node.inputs[18]) elif node.type == 'BSDF_DIFFUSE': if", "'dotNV' if socket == node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0", "{1})'.format(val1, val2) elif op == 'GREATER_THAN': out_val = 'float({0} >", "out_opacity, out_emission def parse_shader(node, socket): global emission_found out_basecol = 'vec3(0.8)'", "windows if arm.utils.get_os() == 'win': s = filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0]", "* t + b[2]) * t + b[3] # Pass", "= parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves = node.mapping.curves name =", "# size = parse_value_input(node.inputs[0]) return '0.0' elif node.type == 'TEX_BRICK':", "== 'Closest': tex['min_filter'] = 'point' tex['mag_filter'] = 'point' # else", "except in compliance with the License. # You may obtain", "6365.0): i = 5 elif(t >= 3315.0): i = 4", "2.5)'.format(col) elif node.type == 'SEPHSV': return '0.0' elif node.type ==", "node.min[0], node.min[1]) # if node.use_max: # out = 'min({0}, vec3({1},", "specular_tint = parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7]) # aniso = parse_vector_input(node.inputs[8])", "Z Depth if socket == node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj')", "== node.outputs[6]: # Backfacing return '(1.0 - float(gl_FrontFacing))' elif socket", "Pink color for missing texture parsed[tex_store] = True curshader.write_textures +=", "sample_bump: write_bump(node, res) return res elif node.type == 'TEX_ENVIRONMENT': #", "'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path, tex['file'])", "elif node.type == 'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0])", "# Tangent Normal elif node.type == 'OBJECT_INFO': return 'wposition' elif", "'(vec3({0}, {1}, {2}) * {3})'.format(\\ vector_curve(name + '0', vec +", "1.0-{4}))'.format(col, hue, sat, val, fac) elif node.type == 'INVERT': fac", "_tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only): global parsed # Compute", "else: # Write bytes if size is different or file", "0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0,", "col2, fac_var) # Revert to mix # out_col = '({0}", "CONDITIONS OF ANY KIND, either express or implied. # See", "elif node.type == 'VALTORGB': # ColorRamp fac = parse_value_input(node.inputs[0]) interp", "None: return None curshader.write('float {0} = {1};'.format(res_var, res)) # Normal", "!= None: curshader.write_textures += 1 res = '{0}.a'.format(texture_store(node, tex, tex_name,", "{1};'.format(index_var, index)) # Linear # Write Xs array facs_var =", "None # Reference image name texpath = arm.utils.asset_path(filepath) texfile =", "= parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value location =", "elif op == 'TANGENT': out_val = 'tan({0})'.format(val1) elif op ==", "Write index index_var = node_name(node.name) + '_i' curshader.write('int {0} =", "= parse_shader_input(node.inputs[2]) if parse_surface: out_basecol = '({0} * {3} +", "arm.utils.convert_image(image, filepath, \"JPEG\") else: arm.log.warn(matname + '/' + image.name +", "{1}, {2})'.format(out_col, bright, contr) elif node.type == 'GAMMA': out_col =", "= 3 elif(t >= 1449.0): i = 2 elif(t >=", "is_ascii(texfile): # Extract packed data / copy non-ascii texture unpack_path", "if do_convert: if not os.path.isfile(unpack_filepath): fmt = 'PNG' if new_ext", "if is_parsed(tex_store): return tex_store parsed[tex_store] = True mat_bind_texture(tex) con.add_elem('tex', 'short2norm')", "eye', link='_cameraPosition') return 'distance(eye, wposition)' elif node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel)", "out_specular, out_opacity, out_emission def parse_shader(node, socket): global emission_found out_basecol =", "blend == 'VALUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "= False return tex def is_pow(num): return ((num & (num", "tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False): output_node = node_by_type(nodes, 'OUTPUT_MATERIAL')", "l.from_socket) # Unparsed node if not is_parsed(res_var): parsed[res_var] = True", "{1}) / 2.0)'.format(vec1, vec2) elif op == 'DOT_PRODUCT': return 'vec3(dot({0},", "'BSDF_TRANSPARENT': if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type", "node.name if node.arm_material_param else None if tex != None: curshader.write_textures", "'vVecCam' elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[0]: #", "elif op == 'ARCTANGENT': out_val = 'atan({0})'.format(val1) elif op ==", "facs_var = name + '_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO:", "# Shaders default to quadratic for now return '1.0' elif", "Fall back to noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "val2) elif op == 'ROUND': # out_val = 'round({0})'.format(val1) out_val", "tex['file'] = arm.utils.safestr(texfile) s = tex['file'].rsplit('.', 1) if len(s) ==", "Ys array ys_var = name + '_ys' curshader.write('float {0}[{1}];'.format(ys_var, len(points)))", "'EASING': f = '0.0' elif grad == 'DIAGONAL': f =", "= parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return", "from the Texture node instead # if node.use_min: # out", "* 0.59 + {0}.b * 0.11) / 3.0) * 2.5)'.format(col)", "# distortion = parse_value_input(node.inputs[3]) # Slow.. res = 'vec3(tex_noise({0} *", "are reserved s = s.replace('_', '_x') return s ## def", "met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2]) if parse_surface: out_basecol", "* (vec3(1.0) - {0}))'.format(col1, col2, fac_var) elif blend == 'DIVIDE':", "= parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'AMBIENT_OCCLUSION': if", "and # limitations under the License. # import math import", "write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0'", "{0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post,", "== 'png' else 'JPEG' arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path) else: #", "'vec3(0.0)' elif socket == node.outputs[5]: # Velocity particle_info['velocity'] = True", "curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac) elif", "if (ext in ('tga', 'dds')) else 'jpg' tex['file'] = tex['file'].rsplit('.',", "occ2) out_specular = '({0} * 0.5 + {1} * 0.5)'.format(spec1,", "{2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump = False if to_linear:", "[ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00],", "'(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return res elif", "TBN * normalize(texn);') else: frag.write('vec3 n = ({0}) * 2.0", "bytes, write converted .jpg to /unpacked filepath += '.raw' elif", "* {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) #", "val2 = parse_value_input(node.inputs[1]) op = node.operation if op == 'ADD':", "'vec3({0})'.format(res_var) else: if inp.type == 'VALUE': # Unlinked reroute return", "'On' else 'vec3(0.0)' elif socket == node.outputs[5]: # Velocity particle_info['velocity']", "'(' if ',' in ar[1]: ar2 = ar[1].split(',', 1) co", "'({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2,", "# ZYX rotation, Z axis for now.. a = rotation[2]", "midlevel = parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3]) return", "ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0,", "# Second uvmap referenced if len(lays) > 1 and node.uv_map", "# 'vposition' elif socket == node.outputs[5]: # Window return 'vec3(0.0)'", "'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path, tex['file']) if", "image.filepath if filepath == '': if image.packed_file is not None:", "for i in range(0, len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position))", "'n' return res elif node.type == 'MAPPING': out = parse_vector_input(node.inputs[0])", "texfile = arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile) s = tex['file'].rsplit('.', 1)", "Depth return '0.0' elif socket == node.outputs[10]: # Transmission Depth", "write_bump(node, res) return res elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked:", "'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1]) return out elif node.type", "socket == node.outputs[1]: return parse_value_input(node.inputs[7]) else: return None else: return", "objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom' elif node.type == 'PARTICLE_INFO': if socket", "Is Reflection Ray return '0.0' elif socket == node.outputs[6]: #", "return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0 -", "',' in ar[1]: ar2 = ar[1].split(',', 1) co = ar2[0]", "node.type == 'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1]) if", "else: return out_val elif node.type == 'RGBTOBW': col = parse_vector_input(node.inputs[0])", "return out_group def parse_group_input(node, socket): index = socket_index(node, socket) parent", "if node.inputs[0].is_linked else 'const ' fac = parse_value_input(node.inputs[0]) fac_var =", "parse_value_input(node.inputs[3]) res = 'tex_noise({0} * {1})'.format(co, scale) if sample_bump: write_bump(node,", "'0' for i in range(1, len(elems)): index += ' +", "socket == node.outputs[0]: return '{0}.x'.format(vec) elif socket == node.outputs[1]: return", "+ node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec1(node.outputs[0].default_value)", "co = 'bposition' scale = parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY':", "# Angular Velocity particle_info['angular_velocity'] = True return 'vec3(0.0)' elif node.type", "socket): # Entering group index = socket_index(node, socket) output_node =", "elif socket == node.outputs[3]: # Is Glossy Ray return '1.0'", "index_var = name + '_i' curshader.write('int {0} = {1};'.format(index_var, index))", "!= '1.0': frag.write('n.xy *= {0};'.format(strength)) frag.write('n = normalize(TBN * n);')", "in range(0, len(elems)): curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0],", "# if node.use_max: # out = 'min({0}, vec3({1}, {2}, {3}))'.format(out,", "res_var = res_var_name(l.from_node, l.from_socket) # Unparsed node if not is_parsed(res_var):", "out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'AMBIENT_OCCLUSION':", "get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return arm.utils.safesrc(name) def get_sdk_path(): return", "os.path.join(unpack_path, tex['file']) # TODO: delete cache when file changes if", "Location particle_info['location'] = True return 'p_location' if arm.utils.get_rp().arm_particles == 'On'", "parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.r'.format(col) elif socket ==", "# Already fetched if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name)", "({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[1] !=", "= _frag geom = _geom tesc = _tesc tese =", "len(elems) == 1: return to_vec3(elems[0].color) # Write cols array cols_var", "_tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only): global parsed # Compute nodes", "= False particle_info['lifetime'] = False particle_info['location'] = False particle_info['size'] =", "sample_bump global sample_bump_res global parsed tex_store = store_var_name(node) if is_parsed(tex_store):", "= vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif node.type ==", "inp.type == 'VALUE': return parse_value_input(inp) def parse_shader_input(inp): if inp.is_linked: l", "elif op == 'SUBTRACT': out_val = '({0} - {1})'.format(val1, val2)", "elif blend == 'SOFT_LIGHT': out_col = '((1.0 - {2}) *", "This module builds upon Cycles nodes work licensed as #", "if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name))", "or os.path.getsize(unpack_filepath) != image.packed_file.size: with open(unpack_filepath, 'wb') as f: f.write(image.packed_file.data)", "return None return res_var def glsl_type(t): if t == 'RGB'", "elif node.image == None: # Empty texture tex = {}", "= _tese parse_surface = _parse_surface parse_opacity = _parse_opacity basecol_only =", "0.0, 0.0]) else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else:", "return 'distance(eye, wposition)' elif node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior =", "by applicable law or agreed to in writing, software #", "or parse_opacity: parsed = {} parents = [] normal_parsed =", "through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_GRADIENT': if", "= ar2[0] post = ',' + ar2[1] else: co =", "parse_value_input(node.inputs[0]) # Roughly map to cycles - 450 to 600", "'mposition' elif node.type == 'HAIR_INFO': return 'vec3(0.0)' # Tangent Normal", "{1})'.format(blend, dotnv) elif socket == node.outputs[1]: # Facing return '(1.0", "1.48001316e-01], [3.78765709e+03, 9.36026367e-06, 3.98995841e-01] ] blackbody_table_g = [ [-7.50343014e+02, 3.15679613e-04,", "do_convert = ext not in ('jpg', 'png', 'hdr', 'mp4') #", "* {0} + {2} * ((vec3(1.0) - {0}) * {1}", "== 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "RGB if node.type == 'GROUP': return parse_group(node, socket) elif node.type", "Write index index_var = name + '_i' curshader.write('int {0} =", "0.5 + {1} * 0.5)'.format(opac1, opac2) elif node.type == 'BSDF_PRINCIPLED':", "'objectInfoRandom' elif node.type == 'PARTICLE_INFO': if socket == node.outputs[0]: #", "= s[1].lower() do_convert = ext not in ('jpg', 'png', 'hdr',", "{0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res)", "len(points))) # TODO: Make const for i in range(0, len(points)):", "+ '(' if ',' in ar[1]: ar2 = ar[1].split(',', 1)", "= parse_vector_input(node.inputs[0]) return '((({0}.r * 0.3 + {0}.g * 0.59", "Second uvmap referenced if len(lays) > 1 and node.uv_map ==", "== 'HUE_SAT': curshader.add_function(c_functions.str_hue_sat) hue = parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1]) val", "applicable law or agreed to in writing, software # distributed", "if st == 'RGB' or st == 'RGBA' or st", "in range(0, len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position)) # Mix", "+ \".jpg\") arm.utils.convert_image(image, filepath, \"JPEG\") else: arm.log.warn(matname + '/' +", "return if inp.is_linked == False: return if normal_parsed: return normal_parsed", "'ROUND': # out_val = 'round({0})'.format(val1) out_val = 'floor({0} + 0.5)'.format(val1)", "scale = parse_value_input(node.inputs[3]) res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1,", "else defaults to linear if image_node.extension != 'REPEAT': # Extend", "Single channel out_occlusion = parse_vector_input(node.inputs[0]) + '.r' elif node.type ==", "import arm.assets import arm.utils import arm.make_state import arm.log import arm.material.mat_state", "= '({0} / {1})'.format(val1, val2) elif op == 'POWER': out_val", "if sample_bump: sample_bump_res = tex_store curshader.write('float {0}_1 = textureOffset({1}, {2}.xy,", "'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac) elif node.type == 'MIX_RGB':", "'RGB' or t == 'RGBA' or t == 'VECTOR': return", "# Is Diffuse Ray return '1.0' elif socket == node.outputs[3]:", "curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res,", "{0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post,", "make_texture(node, tex_name) tex_link = node.name if node.arm_material_param else None if", "fac_var = name + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac))", "Do not use Normal Map node with Armory PBR, connect", "else: return 'float' def to_uniform(inp): uname = safesrc(inp.node.name) + safesrc(inp.name)", "location[2]) # use Extension parameter from the Texture node instead", "== 'VECTOR': return 'vec3' else: return 'float' def to_uniform(inp): uname", "not use Normal Map node with Armory PBR, connect Image", "{4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl)) sample_bump = False def", "elif node.type == 'BUMP': # Interpolation strength strength = parse_value_input(node.inputs[0])", "import shutil emission_found = False particle_info = None # Particle", "particle_info['angular_velocity'] = False sample_bump = False sample_bump_res = '' wrd", "'wnormal' elif socket == node.outputs[2]: # Tangent return 'wtangent' elif", "clip tex['u_addressing'] = 'clamp' tex['v_addressing'] = 'clamp' if image.source ==", "parse_value path preferred? nor = parse_vector_input(node.inputs[0]) return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor)", "-{4}){3};'.format(sample_bump_res, pre, co, post, scl)) sample_bump = False def to_vec1(v):", "'.z', curves[3].points)) elif node.type == 'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0])", "return parse_group_input(node, socket) elif node.type == 'ATTRIBUTE': # Pass time", "strength = parse_value_input(strength_input) if strength != '1.0': frag.write('n.xy *= {0};'.format(strength))", "sample_bump = False sample_bump_res = '' wrd = bpy.data.worlds['Arm'] #", "= parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3]) return '(vec3({0}) * {1})'.format(height, scale)", "elems = node.color_ramp.elements if len(elems) == 1: return to_vec3(elems[0].color) #", "parsed # Compute nodes only once global parents global normal_parsed", "global tesc global tese global parse_surface global parse_opacity global basecol_only", "{} tex['name'] = tex_name tex['file'] = '' return '{0}.a'.format(texture_store(node, tex,", "Raw bytes, write converted .jpg to /unpacked filepath += '.raw'", "(num - 1)) == 0) and num != 0 def", "particle_info['size'] = False particle_info['velocity'] = False particle_info['angular_velocity'] = False sample_bump", "== 'BSDF_HAIR': pass elif node.type == 'HOLDOUT': if parse_surface: #", "arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[4]: #", "\"License\"); # you may not use this file except in", "== 'RGBA' or st == 'VECTOR': return '{0}.x'.format(res_var) else: #", "res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale) if", "# True Normal return 'n' if curshader.shader_type == 'frag' else", "fac_var) # Revert to mix elif blend == 'SATURATION': out_col", "tex['file']) if do_convert: if not os.path.isfile(unpack_filepath): fmt = 'PNG' if", "= store_var_name(node) # Pink color for missing texture curshader.write('vec4 {0}", "elif(t >= 3315.0): i = 4 elif(t >= 1902.0): i", "'vec3(dot({0}, {1}))'.format(vec1, vec2) elif op == 'CROSS_PRODUCT': return 'cross({0}, {1})'.format(vec1,", "image_node.extension != 'REPEAT': # Extend or clip tex['u_addressing'] = 'clamp'", "0.59 + {0}.b * 0.11) / 3.0) * 2.5)'.format(col) elif", "mat_get_material() mat_users = mat_get_material_users() if mat_users != None and mat", "parse_value_input(node.inputs[7]) else: return None else: return parse_group(node, socket) elif node.type", "# Roughly map to cycles - 450 to 600 nanometers", "curve return '(vec3({0}, {1}, {2}) * {3})'.format(\\ vector_curve(name + '0',", "'normalize({0})'.format(vec1) elif node.type == 'DISPLACEMENT': height = parse_value_input(node.inputs[0]) midlevel =", "return '0.0' elif node.type == 'HAIR_INFO': # Is Strand #", "'.raw' elif image.source == \"GENERATED\": unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets',", "(1.0 - {0}), {1})'.format(blend, dotnv) elif socket == node.outputs[1]: #", "elif node.type == 'TEX_POINTDENSITY': return '0.0' elif node.type == 'TEX_VORONOI':", "post, scl)) curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res,", "'0.0' elif node.type == 'HAIR_INFO': # Is Strand # Intercept", "return 'vec3({0}, {1}, {2})'.format(r, g, b) elif node.type == 'WAVELENGTH':", "* {1}).r'.format(co, scale) if sample_bump: write_bump(node, res) return res elif", "name = node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2}) *", "= True elif interpolation == 'Closest': tex['min_filter'] = 'point' tex['mag_filter']", "co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0} *", "parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3]) # Slow.. res = 'vec3(tex_noise({0}", "Ray return '0.0' elif socket == node.outputs[7]: # Ray Length", "node.outputs[1]: # Is Shadow Ray return '0.0' elif socket ==", "strength_input != None: strength = parse_value_input(strength_input) if strength != '1.0':", "vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name + '3c',", "s: # Consecutive _ are reserved s = s.replace('_', '_x')", "return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket", "now return 'vcolor' else: # Vector con.add_elem('tex', 'short2norm') # UVMaps", "mat_bind_texture(tex): mat_state.bind_textures.append(tex) def mat_texture_grad(): return mat_state.texture_grad def mat_get_material(): return mat_state.material", "mix elif blend == 'VALUE': out_col = 'mix({0}, {1}, {2})'.format(col1,", "elif node.type == 'EMISSION': if parse_surface: # Multiply basecol out_basecol", "os import arm.assets import arm.utils import arm.make_state import arm.log import", "1: return to_vec3(elems[0].color) # Write cols array cols_var = node_name(node.name)", "== 'ADD': out_val = '({0} + {1})'.format(val1, val2) elif op", "[] normal_parsed = False curshader = frag out_basecol, out_roughness, out_metallic,", "if sample_bump_res != '': if node.invert: ext = ['1', '2',", "{3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var) out_occlusion =", "mat_state.material.name if image is None: return None # Get filepath", "op == 'SUBTRACT': return '({0} - {1})'.format(vec1, vec2) elif op", "[0.0, 0.0, 0.0] if scale[0] != 1.0 or scale[1] !=", "return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_displacement_input(inp):", "== 'CURVE_VEC': # Vector Curves fac = parse_value_input(node.inputs[0]) vec =", "permissions and # limitations under the License. # import math", "'1.0' out_emission = '0.0' if node.type == 'GROUP': if node.node_tree.name.startswith('Armory", "tex_store = store_var_name(node) if is_parsed(tex_store): return tex_store parsed[tex_store] = True", "sample_bump = False def to_vec1(v): return str(v) def to_vec3(v): return", "node.type == 'ATTRIBUTE': # Pass time till drivers are implemented", "out_val = '({0} / {1})'.format(val1, val2) elif op == 'POWER':", "import math import bpy import os import arm.assets import arm.utils", "elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0])", "to 600 nanometers return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl) #", "con global vert global frag global geom global tesc global", "op == 'MODULO': # out_val = 'float({0} % {1})'.format(val1, val2)", "out_specular = '0.0' elif node.type == 'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2])", "'hdr', 'mp4') # Convert image if do_convert: new_ext = 'png'", "None: matname = mat_state.material.name if image is None: return None", "'GROUP_OUTPUT') if output_node == None: return inp = output_node.inputs[index] parents.append(node)", "transmission = parse_vector_input(node.inputs[15]) # transmission_roughness = parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or", "res elif node.type == 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co =", "/ (1.0 - {1})))'.format(dotnv, blend) elif node.type == 'LIGHT_PATH': if", "res elif node.type == 'TEX_MUSGRAVE': # Fall back to noise", "True emission_strength = parse_value_input(node.inputs[1]) out_basecol = '({0} * {1})'.format(out_basecol, emission_strength)", "# Pass constant return to_vec3([rgb[0], rgb[1], rgb[2]]) elif node.type ==", "math.cos(a), math.sin(a)) # if node.rotation[1] != 0.0: # a =", "'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv) elif socket ==", "return parse_vector_input(inp) elif inp.type == 'VECTOR': return parse_vector_input(inp) elif inp.type", "= parse_value_input(node.inputs[4]) res = 'tex_brick_f({0} * {1})'.format(co, scale) if sample_bump:", "= False particle_info['location'] = False particle_info['size'] = False particle_info['velocity'] =", "- invalid file path') return None # Reference image name", "Length return '0.0' elif socket == node.outputs[8]: # Ray Depth", "{0}[{1}];'.format(ys_var, len(points))) # TODO: Make const for i in range(0,", "> 0: s += '_texread' s = safesrc(s) if '__'", "cos(theta) out = 'vec3({0}.x * {1} - ({0}.y) * {2},", "vec2) elif op == 'AVERAGE': return '(({0} + {1}) /", "'dds')) else 'jpg' tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' +", "if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_CHECKER':", "+ uname) return uname def store_var_name(node): return node_name(node.name) + '_store'", "ntype: return n def socket_index(node, socket): for i in range(0,", "elems[i].color[2])) # Get index fac_var = node_name(node.name) + '_fac' curshader.write('float", "'short2norm') # UVMaps only for now mat = mat_get_material() mat_users", "arm.log.warn('Material ' + matname + '/' + image.name + '", "node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name)) return", "== 'VECTOR': return '{0}.x'.format(res_var) else: # VALUE return res_var else:", "out_metallic = '({0} * {3} + {1} * {2})'.format(met1, met2,", "!= 0.0: # ZYX rotation, Z axis for now.. a", "disp = {0};'.format(out_disp)) def parse_group(node, socket): # Entering group index", "if inp.type == 'SHADER': return parse_shader_input(inp) elif inp.type == 'RGB':", "elif node.type == 'BSDF_TOON': # write_normal(node.inputs[3]) pass elif node.type ==", "parse_surface, parse_opacity, parse_displacement, basecol_only) def parse_output(node, _con, _vert, _frag, _geom,", "normal_parsed global curshader # Active shader - frag for surface", "p in parents: s = p.name + '_' + s", "val1 = parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1]) op = node.operation if", "res elif node.image == None: # Empty texture tex =", "True return 'vec3(0.0)' elif node.type == 'TANGENT': return 'wtangent' elif", "Unlinked reroute return to_vec3([0.0, 0.0, 0.0]) else: if mat_batch() and", "Make const for i in range(0, len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var,", "return 'vec3(0.0)' # 'vposition' elif socket == node.outputs[5]: # Window", "= parse_vector_input(node.inputs[14]) # transmission = parse_vector_input(node.inputs[15]) # transmission_roughness = parse_vector_input(node.inputs[16])", "'RGBA' or t == 'VECTOR': return 'vec3' else: return 'float'", "fac_inv_var = node_name(node.name) + '_fac_inv' curshader.write('{0}float {1} = {2};'.format(prefix, fac_var,", "_frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only): global parsed", "# Revert to mix elif blend == 'VALUE': out_col =", "= {0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular)) if '_Emission' in wrd.world_defs: frag.write('emission", "write_normal(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type", "new_ext = 'png' if (ext in ('tga', 'dds')) else 'jpg'", "Object Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex' elif socket ==", "'QUADRATIC_SPHERE': f = '0.0' elif grad == 'SPHERICAL': f =", "normal_parsed = False rpdat = arm.utils.get_rp() if rpdat.arm_rp_displacement == 'Tessellation'", "or node.inputs[17].default_value[0] != 0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True", "'wb') as f: f.write(image.packed_file.data) # Copy non-ascii texture else: if", "+ {1} * 0.5)'.format(opac1, opac2) elif node.type == 'BSDF_PRINCIPLED': if", "== 'TEX_COORD': #obj = node.object #instance = node.from_instance if socket", "scale) if sample_bump: write_bump(node, res) return res elif node.type ==", "elif node.type == 'MIX_SHADER': prefix = '' if node.inputs[0].is_linked else", "- {2}) * {0} + {2} * ((vec3(1.0) - {0})", "- {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1],", "'mp4') # Convert image if do_convert: new_ext = 'png' if", "'1.0' elif socket == node.outputs[1]: # Is Shadow Ray return", "if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_NOISE':", "you may not use this file except in compliance with", "== 'VALUE': # Unlinked reroute return to_vec3([0.0, 0.0, 0.0]) else:", "out_col = parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac)", "{0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular)) if '_Emission' in wrd.world_defs: frag.write('emission =", "== 'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0},", "global emission_found global particle_info global sample_bump global sample_bump_res con =", "l.from_socket) else: out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic =", "image.packed_file is not None or not is_ascii(texfile): # Extract packed", "once global parents global normal_parsed global curshader # Active shader", "'__' in s: # Consecutive _ are reserved s =", "out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_specular = '0.0' elif", "{2})'.format(out_col, bright, contr) elif node.type == 'GAMMA': out_col = parse_vector_input(node.inputs[0])", "'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var) elif blend ==", "False particle_info['age'] = False particle_info['lifetime'] = False particle_info['location'] = False", "if op == 'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1, vec2) else: return", "0.0]) else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return", "== node.outputs[0]: # Index particle_info['index'] = True return 'p_index' if", "#space = node.space #map = node.uv_map # Color parse_normal_map_color_input(node.inputs[1], node.inputs[0])", "== 'VERTEX_COLOR': con.add_elem('col', 'short4norm') # Vcols only for now return", "'.y', curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points)) elif", "os.path.exists(unpack_path): os.makedirs(unpack_path) filepath = os.path.join(unpack_path, image.name + \".jpg\") arm.utils.convert_image(image, filepath,", "= parse_value_input(node.inputs[1]) # detail = parse_value_input(node.inputs[2]) # distortion = parse_value_input(node.inputs[3])", "* (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var)", "+ '_res' def write_result(l): global parsed res_var = res_var_name(l.from_node, l.from_socket)", "+= '.raw' elif image.source == \"GENERATED\": unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled',", "link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "{0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)", "frag.write('basecol = {0};'.format(out_basecol)) frag.write('roughness = {0};'.format(out_roughness)) frag.write('metallic = {0};'.format(out_metallic)) frag.write('occlusion", "* {2})'.format(opac1, opac2, fac_var, fac_inv_var) elif node.type == 'ADD_SHADER': bc1,", "== 'SUBTRACT': return '({0} - {1})'.format(vec1, vec2) elif op ==", "upon Cycles nodes work licensed as # Copyright 2011-2013 Blender", "node.type == 'BSDF_TOON': # write_normal(node.inputs[3]) pass elif node.type == 'BSDF_TRANSLUCENT':", "'GROUP': if node.node_tree.name.startswith('Armory PBR'): # Displacement if socket == node.outputs[1]:", "parse_surface: # Multiply basecol out_basecol = parse_vector_input(node.inputs[0]) out_emission = '1.0'", "{1}).rgb'.format(co, scale) if sample_bump: write_bump(node, res) return res elif node.type", "res = parse_input(inp) parents.append(parent) # Return to group return res", "= parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) res", "= {} tex['name'] = tex_name tex['file'] = '' return '{0}.rgb'.format(texture_store(node,", "* {1})'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res", "node.type == 'RGB': if node.arm_material_param: nn = 'param_' + node_name(node.name)", "= vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2])) # Get", "curshader.write_textures -= 1 return '{0}.rgb'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic)", "res_var_name(l.from_node, l.from_socket) # Unparsed node if not is_parsed(res_var): parsed[res_var] =", "linear if image_node.extension != 'REPEAT': # Extend or clip tex['u_addressing']", "= 'movie' tex['min_filter'] = 'linear' tex['mag_filter'] = 'linear' tex['mipmap_filter'] =", "arm.assets.add(arm.utils.asset_path(filepath)) # if image_format != 'RGBA32': # tex['format'] = image_format", "# out = 'vec3({0}.x * {1} - {0}.z * {2},", "out_specular = '1.0' out_opacity = '1.0' out_emission = '0.0' return", "= parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' fac_inv_var = node_name(node.name)", "res elif node.type == 'BRIGHTCONTRAST': out_col = parse_vector_input(node.inputs[0]) bright =", "== 'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket) else: out_basecol =", "if filepath == '': if image.packed_file is not None: filepath", "Is Strand # Intercept # Thickness return '0.5' elif node.type", "out_col elif node.type == 'BLACKBODY': t = float(parse_value_input(node.inputs[0])) rgb =", "out_metallic = parse_value_input(node.inputs[4]) out_specular = parse_value_input(node.inputs[5]) # specular_tint = parse_vector_input(node.inputs[6])", "= 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name) else: uv_name = 'texCoord' triplanar", "r[2] rgb[1] = g[0] * t_inv + g[1] * t", "arm.assets.add(path) def assets_add_embedded_data(path): arm.assets.add_embedded_data(path) def mat_name(): return mat_state.material.name def mat_batch():", "0)'.format(fac_var, elems[i].position) # Write index index_var = node_name(node.name) + '_i'", "= inp.links[0] if l.from_node.type == 'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node,", "1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ] blackbody_table_b = [ [0.0, 0.0,", "0.0002;'.format(out_opacity)) # Volume # parse_volume_input(node.inputs[1]) # Displacement if _parse_displacement and", "None: curshader = tese else: curshader = vert out_disp =", "'TEX_GRADIENT': if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co = 'bposition'", "== 'TEX_CHECKER': curshader.add_function(c_functions.str_tex_checker) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co", "if parse_surface: write_normal(node.inputs[4]) # Revert to glossy out_basecol = parse_vector_input(node.inputs[0])", "= 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var) elif blend", "= '({0} * {3} + {1} * {2})'.format(occ1, occ2, fac_var,", "return inp = output_node.inputs[index] parents.append(node) out_group = parse_input(inp) parents.pop() return", "'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale) if sample_bump: write_bump(node, res)", "os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path =", "else: arm.assets.add(arm.utils.asset_path(filepath)) # if image_format != 'RGBA32': # tex['format'] =", "'TANGENT': return 'wtangent' elif node.type == 'TEX_COORD': #obj = node.object", "vec = parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = node_name(node.name) #", "= '({0} + {1})'.format(val1, val2) elif op == 'SUBTRACT': out_val", "location[1], location[2]) # use Extension parameter from the Texture node", "'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix elif", "if to_linear: curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store def write_bump(node,", "facs array facs_var = node_name(node.name) + '_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems)))", "'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv = 'dot({0},", "elif op == 'MODULO': # out_val = 'float({0} % {1})'.format(val1,", "= [0,0,0] blackbody_table_r = [ [2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04,", "file except in compliance with the License. # You may", "arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[1]: #", "and inp.links[0].from_node.type != 'GROUP_INPUT': normal_res = parse_vector_input(inp) if normal_res !=", "= 'clamp' tex['v_addressing'] = 'clamp' if image.source == 'MOVIE': tex['source']", "if not is_parsed(res_var): parsed[res_var] = True st = l.from_socket.type if", "return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2]) def node_by_type(nodes, ntype): for", "parse_surface global parse_opacity global basecol_only global emission_found global particle_info global", "grad == 'QUADRATIC': f = '0.0' elif grad == 'EASING':", "False particle_info = {} particle_info['index'] = False particle_info['age'] = False", "inp.type == 'SHADER': return parse_shader_input(inp) elif inp.type == 'RGB': return", "out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'EMISSION':", "if parse_surface: write_normal(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))", "== 'VALUE': if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('float", "or node.inputs[6].default_value != 0.0: out_emission = parse_value_input(node.inputs[6]) emission_found = True", "== 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0]) # Roughly map to", "basecol_only global emission_found global particle_info global sample_bump global sample_bump_res con", "'0.0' elif socket == node.outputs[5]: # Is Reflection Ray return", "# out = 'vec3({0}.y * {1} - {0}.z * {2},", "op == 'SINE': out_val = 'sin({0})'.format(val1) elif op == 'COSINE':", "to mix elif blend == 'SATURATION': out_col = 'mix({0}, {1},", "os.path.getsize(unpack_filepath) != image.packed_file.size: with open(unpack_filepath, 'wb') as f: f.write(image.packed_file.data) #", "= False particle_info['age'] = False particle_info['lifetime'] = False particle_info['location'] =", "= node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0] if scale[0]", "{1}))'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type == 'NORMAL_MAP': if curshader == tese:", "socket == node.outputs[2]: # Tangent return 'wtangent' elif socket ==", "= parse_value_input(node.inputs[2]) sample_bump = False nor = parse_vector_input(node.inputs[3]) if sample_bump_res", "write_bump(node, res, 0.1) return res elif node.type == 'TEX_MUSGRAVE': #", "+ filepath + ')') return None if do_convert: unpack_path =", "fac_var) elif blend == 'DARKEN': out_col = 'min({0}, {1} *", "return node_name(node.name) + '_store' def texture_store(node, tex, tex_name, to_linear=False, tex_link=None):", "= parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1]) return 'mix({0}, vec3(1.0) - ({0}),", "return out_col elif node.type == 'BLACKBODY': t = float(parse_value_input(node.inputs[0])) rgb", "parse_value_input(node.inputs[1]) res = 'tex_wave_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node,", "g2.zw);'.format(tex_store, tex_name, uv_name)) else: curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name,", "= False rpdat = arm.utils.get_rp() if rpdat.arm_rp_displacement == 'Tessellation' and", "return '(1.0 - pow({0}, ({1} < 0.5) ? 2.0 *", "if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_ENVIRONMENT':", "= {} particle_info['index'] = False particle_info['age'] = False particle_info['lifetime'] =", "1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var) def write_normal(inp): if", "Is Transmission Ray return '0.0' elif socket == node.outputs[7]: #", "fac_var = node_name(node.name) + '_fac' fac_inv_var = node_name(node.name) + '_fac_inv'", "tex = {} tex['name'] = tex_name image = image_node.image if", "parse_surface = _parse_surface parse_opacity = _parse_opacity basecol_only = _basecol_only emission_found", "0.66))'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return res elif", "* (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var,", "- bounds return 'bposition' elif socket == node.outputs[1]: # Normal", "Glossy Ray return '1.0' elif socket == node.outputs[4]: # Is", "for missing texture parsed[tex_store] = True curshader.write_textures += 1 curshader.write('vec4", "1.0 - texCoord.y, 0.0)' elif node.type == 'RGB': if node.arm_material_param:", "parse_vector_input(node.inputs[0]) bright = parse_value_input(node.inputs[1]) contr = parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0},", "met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0]) bc2, rough2, met2,", "tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only) def parse_output(node, _con, _vert,", "out_occlusion = parse_value_input(node.inputs[2]) # Roughness out_roughness = parse_value_input(node.inputs[3]) # Metallic", "uname def store_var_name(node): return node_name(node.name) + '_store' def texture_store(node, tex,", "parse_group_input(node, socket) elif node.type == 'VERTEX_COLOR': con.add_elem('col', 'short4norm') # Vcols", "# Vector con.add_elem('tex', 'short2norm') # UVMaps only for now mat", "socket) elif node.type == 'ATTRIBUTE': # Pass time till drivers", "res_var else: # VALUE return 'vec3({0})'.format(res_var) else: if inp.type ==", "# This module builds upon Cycles nodes work licensed as", "return mat_state.material.name def mat_batch(): return mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex) def", "parse_value(node, socket): global particle_info global sample_bump if node.type == 'GROUP':", "== 'ARCTAN2': out_val = 'atan({0}, {1})'.format(val1, val2) if node.use_clamp: return", "write converted .jpg to /unpacked filepath += '.raw' elif image.source", "== node.outputs[3]: # Location particle_info['location'] = True return 'p_location' if", "g[0] * t_inv + g[1] * t + g[2] rgb[2]", "color for missing texture parsed[tex_store] = True curshader.write_textures += 1", "+ ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[1]", "to_vec3(inp.default_value) def parse_vector(node, socket): global particle_info global sample_bump global sample_bump_res", "'texCoord' triplanar = node.projection == 'BOX' if triplanar: curshader.write(f'vec3 texCoordBlend", "'linearize(gl_FragCoord.z, cameraProj)' # View Distance else: curshader.add_uniform('vec3 eye', link='_cameraPosition') return", "Transparent Depth return '0.0' elif socket == node.outputs[10]: # Transmission", "4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00],", "Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_GRADIENT':", "-7.52204323e-01] ] if (t >= 12000): rgb[0] = 0.826270103 rgb[1]", "node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0]", "out_val = 'asin({0})'.format(val1) elif op == 'ARCCOSINE': out_val = 'acos({0})'.format(val1)", "sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z *", "'({0} * 0.5 + {1} * 0.5)'.format(met1, met2) out_occlusion =", "tex, tex_name, True, tex_link=tex_link)) else: tex_store = store_var_name(node) # Pink", "parse_value_input(inp) def parse_shader_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type", "return normal_parsed = True frag.write_normal += 1 if not get_arm_export_tangents()", "* n)'.format(sample_bump_res) sample_bump_res = '' else: res = 'n' return", "scale[1], scale[2]) if rotation[2] != 0.0: # ZYX rotation, Z", "1 : 0)'.format(fac_var, points[i].location[0]) # Write index index_var = name", "= node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) index", "triplanar: curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2", "parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.x'.format(vec) elif socket ==", "curshader.write_textures += 1 curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))", "return parse_value_input(inp) def parse_shader_input(inp): if inp.is_linked: l = inp.links[0] if", "out_emission def parse_displacement_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type", "True return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif", "{0}.x * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))", "elif op == 'ARCTAN2': out_val = 'atan({0}, {1})'.format(val1, val2) if", "= 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_magic({0} * {1}", "return res elif node.type == 'TEX_ENVIRONMENT': # Pass through return", "'1.0' elif node.type == 'VALUE': if node.arm_material_param: nn = 'param_'", "'({0}.x + {0}.y) * 0.5'.format(co) elif grad == 'RADIAL': f", "{1})'.format(height, scale) def parse_normal_map_color_input(inp, strength_input=None): global normal_parsed global frag if", "in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1])) # Get", "'min({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend == 'LIGHTEN':", "== node.outputs[1]: # Facing return '(1.0 - pow({0}, ({1} <", "' - Do not use Normal Map node with Armory", "'0.0' elif grad == 'EASING': f = '0.0' elif grad", "True return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif", "Cycles nodes work licensed as # Copyright 2011-2013 Blender Foundation", "mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve lays", "Tangent Normal elif node.type == 'OBJECT_INFO': return 'wposition' elif node.type", "elif node.type == 'HOLDOUT': if parse_surface: # Occlude out_occlusion =", "return out elif node.type == 'NORMAL': if socket == node.outputs[0]:", "elif op == 'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1, vec2) elif op", "== 'MULTIPLY': out_val = '({0} * {1})'.format(val1, val2) elif op", "= '0.0' elif node.type == 'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass elif", "= 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' if socket ==", "tex_store parsed[tex_store] = True mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link)", "+ {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if node.rotation[0]", "= texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name)) if sample_bump: sample_bump_res = tex_store", "= node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node != None: parse_output(output_node, con, vert,", "node.outputs[0]: # Index particle_info['index'] = True return 'p_index' if arm.utils.get_rp().arm_particles", "node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' return", "= tex['file'].rsplit('.', 1) if len(s) == 1: arm.log.warn(matname + '/'", "* {0} + {0} * (vec3(1.0) - (vec3(1.0) - {1})", "node with Armory PBR, connect Image Texture directly') parse_normal_map_color_input(node.inputs[5]) #", "{0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a,", "arm.utils.convert_image(image, converted_path, file_format=fmt) arm.assets.add(converted_path) else: # Link image path to", "# Testing.. get function parts.. ar = res.split('(', 1) pre", "return 'dot({0}, {1})'.format(vec1, vec2) else: return '0.0' ## def vector_curve(name,", "= False nor = parse_vector_input(node.inputs[3]) if sample_bump_res != '': if", "node.type == 'MIX_SHADER': prefix = '' if node.inputs[0].is_linked else 'const", "{2})'.format(col1, col2, fac_var) elif blend == 'OVERLAY': out_col = 'mix({0},", "4.24068546e-04, -7.52204323e-01] ] if (t >= 12000): rgb[0] = 0.826270103", "[-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ] blackbody_table_b = [ [0.0, 0.0, 0.0,", "= 'png' if (ext in ('tga', 'dds')) else 'jpg' tex['file']", "parse_normal_map_color_input(node.inputs[5]) # Emission if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0: out_emission", "# Consecutive _ are reserved s = s.replace('_', '_x') return", "= True return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'", "facs_var) elif node.type == 'CURVE_VEC': # Vector Curves fac =", "range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0])) # Map vector", "VALUE return 'vec3({0})'.format(res_var) else: if inp.type == 'VALUE': # Unlinked", "out_metallic, out_occlusion, out_specular, out_opacity, out_emission def parse_displacement_input(inp): if inp.is_linked: l", "'MODULO': # out_val = 'float({0} % {1})'.format(val1, val2) out_val =", "= 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix", "= parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma) elif node.type == 'HUE_SAT':", "Backfacing return '(1.0 - float(gl_FrontFacing))' elif socket == node.outputs[7]: #", "scale = parse_value_input(node.inputs[1]) res = 'tex_magic({0} * {1} * 4.0)'.format(co,", "'tex_voronoi({0} * {1}).a'.format(co, scale) else: # CELLS res = 'tex_voronoi({0}", "filepath = image.filepath if filepath == '': if image.packed_file is", "parse_value_input(node.inputs[4]) # Normal if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP': warn(mat_name()", "Ray Length return '0.0' elif socket == node.outputs[8]: # Ray", ">= 1449.0): i = 2 elif(t >= 1167.0): i =", "parsed[tex_store] = True mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if", "os.makedirs(unpack_path) unpack_filepath = os.path.join(unpack_path, tex['file']) if do_convert: if not os.path.isfile(unpack_filepath):", "Is Glossy Ray return '1.0' elif socket == node.outputs[4]: #", "return to_vec3(socket.default_value) elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if node.inputs[0].is_linked: co", "' ' + uname) return uname def store_var_name(node): return node_name(node.name)", "{0} = {1};'.format(index_var, index)) if interp == 'CONSTANT': return '{0}[{1}]'.format(cols_var,", "parse_group_input(node, socket) elif node.type == 'MIX_SHADER': prefix = '' if", "elif node.type == 'OBJECT_INFO': return 'wposition' elif node.type == 'PARTICLE_INFO':", "t + b[3] # Pass constant return to_vec3([rgb[0], rgb[1], rgb[2]])", "{0} = {1};'.format(fac_var, fac)) index = '0' for i in", "texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;') else: if mat_texture_grad(): curshader.write('vec4 {0} =", "+ '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) col1 = parse_vector_input(node.inputs[1])", "else: return None else: return parse_group(node, socket) elif node.type ==", "node.inputs[0]) return None elif node.type == 'VECT_TRANSFORM': #type = node.vector_type", "## def get_rp_renderer(): return arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def", "def mat_texture_grad(): return mat_state.texture_grad def mat_get_material(): return mat_state.material def mat_get_material_users():", "curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)' # View Distance", "vert out_disp = parse_displacement_input(node.inputs[2]) curshader.write('vec3 disp = {0};'.format(out_disp)) def parse_group(node,", "1 and node.uv_map == lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0", "and node.inputs[2].is_linked: parsed = {} parents = [] normal_parsed =", "ar2[0] post = ',' + ar2[1] else: co = ar[1][:-1]", "if t == 'RGB' or t == 'RGBA' or t", "* {1} - {0}.z * {2}, {0}.x * {2} +", "None and node.image.colorspace_settings.name == 'sRGB' res = '{0}.rgb'.format(texture_store(node, tex, tex_name,", "res, 0.1) return res elif node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if", "* ((vec3(1.0) - {0}) * {1} * {0} + {0}", "'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif node.type == 'COMBRGB': r = parse_value_input(node.inputs[0])", "out elif node.type == 'NORMAL': if socket == node.outputs[0]: return", "== node.outputs[3]: # True Normal return 'n' if curshader.shader_type ==", "elif node.type == 'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass elif node.type ==", "parameter from the Texture node instead # if node.use_min: #", "global sample_bump_res global parsed tex_store = store_var_name(node) if is_parsed(tex_store): return", "return '{0}.b'.format(col) elif node.type == 'SEPXYZ': vec = parse_vector_input(node.inputs[0]) if", "res)) # Normal map already parsed, return elif l.from_node.type ==", "socket) parent = parents.pop() # Leaving group inp = parent.inputs[index]", "node.outputs[9]: # Transparent Depth return '0.0' elif socket == node.outputs[10]:", "ColorRamp return '1.0' elif node.type == 'MATH': val1 = parse_value_input(node.inputs[0])", "node_name(node.name) + '_i' curshader.write('int {0} = {1};'.format(index_var, index)) if interp", "# Vcols only for now return 'vcolor' else: # Vector", "= TBN * normalize(texn);') else: frag.write('vec3 n = ({0}) *", "= [ [-7.50343014e+02, 3.15679613e-04, 4.73464526e-01], [-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05,", "with Armory PBR, connect Image Texture directly') parse_normal_map_color_input(node.inputs[5]) # Emission", "'RGB' or st == 'RGBA' or st == 'VECTOR': res", "col2, fac_var) # Revert to mix elif blend == 'HUE':", "store_var_name(node) # Pink color for missing texture parsed[tex_store] = True", "* {0}.y + {0}.z * {0}.z), 0.0)'.format(co) res = '(clamp({0},", "== node.outputs[4]: # Incoming return 'vVec' elif socket == node.outputs[5]:", "# Pass time till drivers are implemented if node.attribute_name ==", "if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0: out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found", "parse_group(node, socket): # Entering group index = socket_index(node, socket) output_node", "global parsed tex_store = store_var_name(node) if is_parsed(tex_store): return tex_store parsed[tex_store]", "builds upon Cycles nodes work licensed as # Copyright 2011-2013", "pass elif node.type == 'SUBSURFACE_SCATTERING': if parse_surface: write_normal(node.inputs[4]) out_basecol =", "return '{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name) tex = make_texture(node, tex_name) tex_link", "0.1) return res elif node.type == 'TEX_POINTDENSITY': # Pass through", "store_var_name(node) # Pink color for missing texture curshader.write('vec4 {0} =", "'REROUTE': return parse_value_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if", "'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3])", "== 'On' else '0.0' elif socket == node.outputs[2]: # Lifetime", "elif op == 'SINE': out_val = 'sin({0})'.format(val1) elif op ==", "for now.. a = rotation[2] # x * cos(theta) -", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "parse_vector_input(node.inputs[2]) blend = node.blend_type if blend == 'MIX': out_col =", "return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type == 'BUMP':", "License. # You may obtain a copy of the License", "res == None: return None curshader.write('float {0} = {1};'.format(res_var, res))", "parsed return s in parsed def res_var_name(node, socket): return node_name(node.name)", "{2}) * {3})'.format(\\ vector_curve(name + '0', vec + '.x', curves[0].points),", "{2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0) -", "= parse_value_input(node.inputs[4]) res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co,", "def write_normal(inp): if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT': normal_res =", "== 'GROUP': return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return", "'{0}.r'.format(col) elif socket == node.outputs[1]: return '{0}.g'.format(col) elif socket ==", "return arm.utils.safesrc(name) def get_sdk_path(): return arm.utils.get_sdk_path() def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target)", "[-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02, -4.59745390e-06, 1.09090465e+00] ] blackbody_table_b = [", "'{0}.a'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co =", "= {0} - 0.0002;'.format(out_opacity)) # Volume # parse_volume_input(node.inputs[1]) # Displacement", "parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[4]) res =", "co, post, scl)) curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4},", "return s in parsed def res_var_name(node, socket): return node_name(node.name) +", "parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = node_name(node.name) # mapping.curves[0].points[0].handle_type return", "!= 0.0 or location[1] != 0.0 or location[2] != 0.0:", "def texture_store(node, tex, tex_name, to_linear=False, tex_link=None): global sample_bump global sample_bump_res", "= parse_vector_input(node.inputs[15]) # transmission_roughness = parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or node.inputs[17].default_value[0]", "if image.packed_file is not None or not is_ascii(texfile): # Extract", "s = filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower())) else:", "Get index fac_var = node_name(node.name) + '_fac' curshader.write('float {0} =", "blackbody_table_b[i] t_inv = 1.0 / t rgb[0] = r[0] *", "{1} - {0}.z * {2}, {0}.y * {2} + {0}.z", "co = 'bposition' grad = node.gradient_type if grad == 'LINEAR':", "{0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_2", "= output_node.inputs[index] parents.append(node) out_group = parse_input(inp) parents.pop() return out_group def", "node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5},", "# Displacement if socket == node.outputs[1]: return parse_value_input(node.inputs[7]) else: return", "'COLOR': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "= '0.0' return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission", "mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): lays = mat_user.data.uv_layers #", "frag.write('opacity = {0} - 0.0002;'.format(out_opacity)) # Volume # parse_volume_input(node.inputs[1]) #", "== 'SPHERICAL': f = 'max(1.0 - sqrt({0}.x * {0}.x +", "out_val elif node.type == 'RGBTOBW': col = parse_vector_input(node.inputs[0]) return '((({0}.r", "node.inputs[1].is_linked: dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' if", "== node.outputs[2]: # Is Diffuse Ray return '1.0' elif socket", "if image.source == 'MOVIE': tex['source'] = 'movie' tex['min_filter'] = 'linear'", "math.sin(a)) # if node.rotation[0] != 0.0: # a = node.rotation[0]", "node.type == 'CAMERA': # View Vector in camera space return", "= res.split('(', 1) pre = ar[0] + '(' if ','", "2.0)'.format(vec1, vec2) elif op == 'DOT_PRODUCT': return 'vec3(dot({0}, {1}))'.format(vec1, vec2)", "= parse_value_input(node.inputs[18]) elif node.type == 'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2]) out_basecol", "parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) col3 = parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4])", "# Incoming return 'vVec' elif socket == node.outputs[5]: # Parametric", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "get_sdk_path(): return arm.utils.get_sdk_path() def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text)", "{1} * 0.5)'.format(met1, met2) out_occlusion = '({0} * 0.5 +", "* 0.3 + {0}.g * 0.59 + {0}.b * 0.11)", "return i def node_name(s): for p in parents: s =", "blend == 'OVERLAY': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "+ g[1] * t + g[2] rgb[2] = ((b[0] *", "elif node.type == 'NORMAL': if socket == node.outputs[0]: return to_vec3(node.outputs[0].default_value)", "parse_vector_input(node.inputs[0]) elif node.type == 'COMBXYZ': x = parse_value_input(node.inputs[0]) y =", "!= None: curshader = tese else: curshader = vert out_disp", "node.type == 'GAMMA': out_col = parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1]) return", "elif node.type == 'CURVE_VEC': # Vector Curves fac = parse_value_input(node.inputs[0])", "required by applicable law or agreed to in writing, software", "node.type == 'NEW_GEOMETRY': if socket == node.outputs[0]: # Position return", "if socket == node.outputs[2]: # Object Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex')", "== 'MIX': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) elif", "i in range(0, len(elems)): curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i,", "= parse_value_input(node.inputs[0]) return '0.0' elif node.type == 'TEX_BRICK': curshader.add_function(c_functions.str_tex_brick) if", "def write_result(l): global parsed res_var = res_var_name(l.from_node, l.from_socket) # Unparsed", "scale = parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale) if", "== 'QUADRATIC': f = '0.0' elif grad == 'EASING': f", "parse_value_input(node.inputs[7]) # aniso = parse_vector_input(node.inputs[8]) # aniso_rot = parse_vector_input(node.inputs[9]) #", "agreed to in writing, software # distributed under the License", "= textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump = False", "{} tex['name'] = tex_name image = image_node.image if matname is", "safesrc(s) if '__' in s: # Consecutive _ are reserved", "parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2]) return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v) elif node.type", "+ s if curshader.write_textures > 0: s += '_texread' s", "1) if len(s) == 1: arm.log.warn(matname + '/' + image.name", "{3})'.format(\\ vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name +", "'AMBIENT_OCCLUSION': if parse_surface: # Single channel out_occlusion = parse_vector_input(node.inputs[0]) +", "'_fac_inv' curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac)) curshader.write('{0}float {1} =", "scl)) curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre,", "= arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile) s = tex['file'].rsplit('.', 1) if", "res = 'tex_noise({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res,", "to assets # TODO: Khamake converts .PNG to .jpg? Convert", "= p.name + '_' + s if curshader.write_textures > 0:", "* (vec3(1.0) - {0}))));'.format(col1, col2, fac) elif blend == 'LINEAR_LIGHT':", "ext = s[1].lower() do_convert = ext not in ('jpg', 'png',", "== node.outputs[6]: # Is Transmission Ray return '0.0' elif socket", "= True frag.write_normal += 1 if not get_arm_export_tangents() or mat_get_material().arm_decal:", "0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res)) res =", "con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif socket", "= filepath.endswith(('.jpg', '.png', '.hdr')) if not has_ext: # Raw bytes,", "rgb[0] = 4.70366907 rgb[1] = 0.0 rgb[2] = 0.0 else:", "Normal elif node.type == 'OBJECT_INFO': return 'wposition' elif node.type ==", "else: uv_name = 'texCoord' triplanar = node.projection == 'BOX' if", "out_val = 'float({0} < {1})'.format(val1, val2) elif op == 'GREATER_THAN':", "1 to_linear = node.image != None and node.image.colorspace_settings.name == 'sRGB'", "{2})'.format(col1, col2, fac_var) elif blend == 'ADD': out_col = 'mix({0},", "2 elif(t >= 1167.0): i = 1 else: i =", "normalize(texn);') else: frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))", "else: co = 'bposition' scale = parse_value_input(node.inputs[4]) res = 'tex_brick_f({0}", "= 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var) elif blend", "{2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) # if", "+= texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;') else: if mat_texture_grad(): curshader.write('vec4 {0}", "elif texfilter == 'Point': interpolation = 'Closest' # TODO: Blender", "return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_SKY': # Pass", "OR CONDITIONS OF ANY KIND, either express or implied. #", "elif node.type == 'VALTORGB': # ColorRamp return '1.0' elif node.type", "0.0: # a = node.rotation[0] # out = 'vec3({0}.y *", "elif op == 'GREATER_THAN': out_val = 'float({0} > {1})'.format(val1, val2)", "scl=0.001): global sample_bump global sample_bump_res sample_bump_res = store_var_name(node) + '_bump'", "global emission_found out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic =", "fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' fac_inv_var =", "socket): global particle_info global sample_bump global sample_bump_res # RGB if", "= '0.0' elif node.type == 'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2]) out_basecol", "return 'vec3(0.0)' elif node.type == 'TANGENT': return 'wtangent' elif node.type", "parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type == 'AMBIENT_OCCLUSION': if parse_surface:", "governing permissions and # limitations under the License. # import", "global sample_bump_res con = _con vert = _vert frag =", "== 'ARCTANGENT': out_val = 'atan({0})'.format(val1) elif op == 'ARCTAN2': out_val", "if do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked') if not", "- texCoord.y, 0.0)' elif node.type == 'RGB': if node.arm_material_param: nn", "Vector Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves =", "1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if", "vVec)'.format(parse_vector_input(node.inputs[1])) else: dotnv = 'dotNV' if socket == node.outputs[0]: #", "parse_value_input(node.inputs[2]) curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr) elif node.type", "out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True # clearcoar_normal = parse_vector_input(node.inputs[20])", "# tex['format'] = image_format interpolation = image_node.interpolation rpdat = arm.utils.get_rp()", "tex_name tex['file'] = '' return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link))", "l.from_socket.type if st == 'RGB' or st == 'RGBA' or", "and is_pow(image.size[1]) if interpolation == 'Cubic': # Mipmap linear tex['mipmap_filter']", "+ '_i' curshader.write('int {0} = {1};'.format(index_var, index)) # Linear #", "may not use this file except in compliance with the", "'1.0' elif socket == node.outputs[4]: # Is Singular Ray return", "? 2.0 * {1} : 0.5 / (1.0 - {1})))'.format(dotnv,", "node.outputs[5]: # Parametric return 'mposition' elif node.type == 'HAIR_INFO': return", "'GREATER_THAN': out_val = 'float({0} > {1})'.format(val1, val2) elif op ==", "+ 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var) elif node.type", "1)) == 0) and num != 0 def is_ascii(s): return", "# Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return None elif node.type == 'VECT_TRANSFORM':", "node.node_tree.name.startswith('Armory PBR'): # Displacement if socket == node.outputs[1]: return parse_value_input(node.inputs[7])", "op == 'ARCCOSINE': out_val = 'acos({0})'.format(val1) elif op == 'ARCTANGENT':", "_frag geom = _geom tesc = _tesc tese = _tese", "parse_value_input(strength_input) if strength != '1.0': frag.write('n.xy *= {0};'.format(strength)) frag.write('n =", "# # Licensed under the Apache License, Version 2.0 (the", "tesc = _tesc tese = _tese parse_surface = _parse_surface parse_opacity", "curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump", "f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y *", "= parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation elems = node.color_ramp.elements if len(elems)", "vector_curve(name + '2', vec + '.z', curves[2].points), fac) elif node.type", "textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_2 = textureOffset({1},", "== 'RGB' or t == 'RGBA' or t == 'VECTOR':", "{0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))", "if socket == node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z,", "parse_vector_input(node.inputs[0]) + '.r' elif node.type == 'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4])", "rough2, fac_var, fac_inv_var) out_metallic = '({0} * {3} + {1}", "= parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.x'.format(vec) elif socket", "parse_shader_input(inp) elif inp.type == 'RGB': return parse_vector_input(inp) elif inp.type ==", "res_var = write_result(l) st = l.from_socket.type if st == 'RGB'", "parents global normal_parsed global curshader # Active shader - frag", "strength strength = parse_value_input(node.inputs[0]) # Height multiplier # distance =", "len(points)): index += ' + ({0} > {1} ? 1", "elif blend == 'DARKEN': out_col = 'min({0}, {1} * {2})'.format(col1,", "0.5)'.format(spec1, spec2) out_emission = '({0} * 0.5 + {1} *", "= {0};'.format(out_metallic)) frag.write('occlusion = {0};'.format(out_occlusion)) frag.write('specular = {0};'.format(out_specular)) if '_Emission'", "texture tex = {} tex['name'] = tex_name tex['file'] = ''", "= parse_input(inp) parents.append(parent) # Return to group return res def", "col2, fac_var) elif blend == 'SUBTRACT': out_col = 'mix({0}, {0}", "+ {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var) out_specular = '({0}", "- {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var) def write_normal(inp): if inp.is_linked", "' + uname) return uname def store_var_name(node): return node_name(node.name) +", "'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise) assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png') assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256',", "Ray return '0.0' elif socket == node.outputs[2]: # Is Diffuse", "= 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y", "mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): lays = mat_user.data.uv_layers # Second uvmap", "s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) # if image_format != 'RGBA32': # tex['format']", "'ABSOLUTE': out_val = 'abs({0})'.format(val1) elif op == 'MINIMUM': out_val =", "or not is_ascii(texfile): # Extract packed data / copy non-ascii", "texture curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store)", "= 'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x *", "if sample_bump: write_bump(node, res) return res elif node.type == 'TEX_IMAGE':", "co = parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[3])", "else: # VALUE return res_var else: if mat_batch() and inp.is_uniform:", "'REROUTE': return parse_vector_input(l.from_node.inputs[0]) res_var = write_result(l) st = l.from_socket.type if", "1.0);'.format(tex_store)) curshader.write_textures -= 1 return '{0}.rgb'.format(tex_store) elif node.type == 'TEX_MAGIC':", "{1})'.format(ior, dotnv) elif node.type == 'NEW_GEOMETRY': if socket == node.outputs[6]:", "+ {1})'.format(val1, val2) elif op == 'SUBTRACT': out_val = '({0}", "met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1]) if parse_surface: out_basecol", "parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[1]) if node.coloring", "curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;')", "/ copy non-ascii texture unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')", "return str(v) def to_vec3(v): return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2])", "if ',' in ar[1]: ar2 = ar[1].split(',', 1) co =", "frag.write('n = normalize(TBN * n);') con.add_elem('tang', 'short4norm') frag.write_normal -= 1", "Leaving group inp = parent.inputs[index] res = parse_input(inp) parents.append(parent) #", "'2', vec + '.z', curves[2].points), fac,\\ vector_curve(name + '3a', vec", "texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;')", "if size is different or file does not exist yet", "1) arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) # if", "parents: s = p.name + '_' + s if curshader.write_textures", "return res elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked: co", "'(1.0 - float(gl_FrontFacing))' elif socket == node.outputs[7]: # Pointiness return", "'.z', curves[2].points), fac,\\ vector_curve(name + '3a', vec + '.x', curves[3].points),", "# Revert to mix elif blend == 'BURN': out_col =", "write_bump(node, res, 0.1) return res elif node.type == 'TEX_POINTDENSITY': return", "index)) # Linear # Write Xs array facs_var = name", "= {1};'.format(index_var, index)) if interp == 'CONSTANT': return '{0}[{1}]'.format(cols_var, index_var)", "# Unlinked reroute return to_vec3([0.0, 0.0, 0.0]) else: if mat_batch()", "- {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2]) out_basecol", "texfilter == 'Point': interpolation = 'Closest' # TODO: Blender seems", "'PARTICLE_INFO': if socket == node.outputs[0]: # Index particle_info['index'] = True", "vec + '.z', curves[2].points), fac,\\ vector_curve(name + '3a', vec +", "= '({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2) out_emission", "global tese global parse_surface global parse_opacity global basecol_only global emission_found", "file extension required for image name') return None ext =", "vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl)) sample_bump = False", "parse_value_input(node.inputs[3]) res = 'tex_checker_f({0}, {1})'.format(co, scale) if sample_bump: write_bump(node, res)", "'{0}.rgb'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co =", "'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else: return None def parse_vector_input(inp):", "def disp_enabled(): return arm.utils.disp_enabled(arm.make_state.target) def warn(text): arm.log.warn(text) def assets_add(path): arm.assets.add(path)", "'compiled', 'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path = os.path.join(unpack_path,", "'BSDF_GLASS': if parse_surface: write_normal(node.inputs[3]) out_roughness = parse_value_input(node.inputs[1]) if parse_opacity: out_opacity", "{3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var) elif node.type", "if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[1]:", "out_roughness = '({0} * {3} + {1} * {2})'.format(rough1, rough2,", "index_var, fac_var, facs_var) elif node.type == 'CURVE_VEC': # Vector Curves", "facs_var) def write_normal(inp): if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT': normal_res", "+ {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var) out_roughness = '({0}", "= '({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1,", "inp.type == 'RGBA': return parse_vector_input(inp) elif inp.type == 'VECTOR': return", "'2', vec + '.z', curves[2].points), fac) elif node.type == 'CURVE_RGB':", "Generated - bounds return 'bposition' elif socket == node.outputs[1]: #", "fac) elif node.type == 'INVERT': fac = parse_value_input(node.inputs[0]) out_col =", "'cross({0}, {1})'.format(vec1, vec2) elif op == 'NORMALIZE': return 'normalize({0})'.format(vec1) elif", "0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale) if sample_bump: write_bump(node,", "!= None and mat in mat_users: mat_user = mat_users[mat][0] if", "color out_basecol = parse_vector_input(node.inputs[0]) # Occlusion out_occlusion = parse_value_input(node.inputs[2]) #", "return 'cross({0}, {1})'.format(vec1, vec2) elif op == 'NORMALIZE': return 'normalize({0})'.format(vec1)", "# Extract packed data / copy non-ascii texture unpack_path =", "= 'pow({0}, {1})'.format(val1, val2) elif op == 'LOGARITHM': out_val =", "arm.log import arm.material.mat_state as mat_state import arm.material.cycles_functions as c_functions import", "scale[0] != 1.0 or scale[1] != 1.0 or scale[2] !=", "frag.write('emission = {0};'.format(out_emission)) if parse_opacity: frag.write('opacity = {0} - 0.0002;'.format(out_opacity))", "in writing, software # distributed under the License is distributed", "node.outputs[4]: # Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom' elif node.type", "[2.52432244e+03, -1.06185848e-03, 3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03,", "for i in range(0, len(node.outputs)): if node.outputs[i] == socket: return", "== 'CURVE_RGB': # RGB Curves fac = parse_value_input(node.inputs[0]) vec =", "res, 0.1) return res elif node.type == 'TEX_MUSGRAVE': # Fall", "frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only) def parse_output(node,", "bpy import os import arm.assets import arm.utils import arm.make_state import", "0.1) return res elif node.type == 'TEX_MUSGRAVE': # Fall back", "== 'SCREEN': out_col = '(vec3(1.0) - (vec3(1.0 - {2}) +", "== 'VALUE': res = parse_value(l.from_node, l.from_socket) if res == None:", "# Extend or clip tex['u_addressing'] = 'clamp' tex['v_addressing'] = 'clamp'", "'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale) if sample_bump: write_bump(node, res)", "{3}))'.format(out, node.max[0], node.max[1]) return out elif node.type == 'NORMAL': if", "# TODO: is parse_value path preferred? nor = parse_vector_input(node.inputs[0]) return", "= node.object #instance = node.from_instance if socket == node.outputs[0]: #", "not os.path.isfile(converted_path): fmt = 'PNG' if new_ext == 'png' else", "socket == node.outputs[3]: # Material Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return", "= parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[3]) res", "to group return res def parse_input(inp): if inp.type == 'SHADER':", "/ {1}))'.format(col1, col2, fac_var) elif blend == 'DIFFERENCE': out_col =", "socket == node.outputs[0]: # Position return 'wposition' elif socket ==", "else: if mat_batch() and inp.is_uniform: return to_uniform(inp) else: return to_vec1(inp.default_value)", "parse_value_input(node.inputs[1]) if node.coloring == 'INTENSITY': res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co,", "# sheen_tint = parse_vector_input(node.inputs[11]) # clearcoat = parse_vector_input(node.inputs[12]) # clearcoat_rough", "parse_vector_input(node.inputs[0]) else: co = 'bposition' scale = parse_value_input(node.inputs[3]) res =", "')' curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "fac) elif node.type == 'MIX_RGB': fac = parse_value_input(node.inputs[0]) fac_var =", "True mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked: uv_name", "{2}) * vec3({4}, {5}, {6})) * {3})'.format(\\ vector_curve(name + '0',", "op == 'ROUND': # out_val = 'round({0})'.format(val1) out_val = 'floor({0}", "node.outputs[1]: return '{0}.g'.format(col) elif socket == node.outputs[2]: return '{0}.b'.format(col) elif", "uv_name)) sample_bump = False if to_linear: curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store))", "parsed = {} parents = [] normal_parsed = False rpdat", "socket == node.outputs[5]: # Parametric return 'mposition' elif node.type ==", "matrix frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y", "const for i in range(0, len(elems)): curshader.write('{0}[{1}] = vec3({2}, {3},", "mix elif blend == 'SOFT_LIGHT': out_col = '((1.0 - {2})", "t_inv + g[1] * t + g[2] rgb[2] = ((b[0]", "op == 'NORMALIZE': return 'normalize({0})'.format(vec1) elif node.type == 'DISPLACEMENT': height", "pass return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission def", "tex['name'] = tex_name image = image_node.image if matname is None:", "elif socket == node.outputs[3]: # Material Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex')", "{2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2,", "global sample_bump if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): #", "col3 = parse_vector_input(node.inputs[3]) scale = parse_value_input(node.inputs[4]) res = 'tex_brick({0} *", "'atan({0})'.format(val1) elif op == 'ARCTAN2': out_val = 'atan({0}, {1})'.format(val1, val2)", "mat_name(): return mat_state.material.name def mat_batch(): return mat_state.batch def mat_bind_texture(tex): mat_state.bind_textures.append(tex)", "{3}))'.format(out, location[0], location[1], location[2]) # use Extension parameter from the", "-7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ] if (t >= 12000):", "inp.type == 'VALUE': # Unlinked reroute return to_vec3([0.0, 0.0, 0.0])", "scale[2]) if rotation[2] != 0.0: # ZYX rotation, Z axis", "Write Xs array facs_var = name + '_xs' curshader.write('float {0}[{1}];'.format(facs_var,", "None elif node.type == 'VECT_TRANSFORM': #type = node.vector_type #conv_from =", "'({0} * {1})'.format(val1, val2) elif op == 'DIVIDE': out_val =", "socket == node.outputs[4]: # Random curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom'", "= parse_value_input(node.inputs[1]) out_basecol = '({0} * {1})'.format(out_basecol, emission_strength) elif node.type", "elif socket == node.outputs[5]: # Window return 'vec3(0.0)' # 'wvpposition'", "in range(1, len(elems)): index += ' + ({0} > {1}", "[-1.00402363e+03, 1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03,", "'BOX' if triplanar: curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 =", "mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'): lays = mat_user.data.uv_layers", "op = node.operation if op == 'ADD': return '({0} +", "- {0}))));'.format(col1, col2, fac) elif blend == 'LINEAR_LIGHT': out_col =", "# Unless required by applicable law or agreed to in", "= name + '_i' curshader.write('int {0} = {1};'.format(index_var, index)) #", "True if parse_opacity: out_opacity = parse_value_input(node.inputs[1]) else: return parse_group(node, socket)", "# write_normal(node.inputs[3]) pass elif node.type == 'BSDF_TRANSLUCENT': if parse_surface: write_normal(node.inputs[1])", "parsed, return elif l.from_node.type == 'NORMAL_MAP': return None return res_var", "safesrc(name): return arm.utils.safesrc(name) def get_sdk_path(): return arm.utils.get_sdk_path() def disp_enabled(): return", "= 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) scale =", "= 'Linear' elif texfilter == 'Point': interpolation = 'Closest' #", "= '' if node.inputs[0].is_linked else 'const ' fac = parse_value_input(node.inputs[0])", "elif socket == node.outputs[8]: # Ray Depth return '0.0' elif", "{} particle_info['index'] = False particle_info['age'] = False particle_info['lifetime'] = False", "the Apache License, Version 2.0 (the \"License\"); # you may", "{2})'.format(spec1, spec2, fac_var, fac_inv_var) out_emission = '({0} * {3} +", "blend == 'MULTIPLY': out_col = 'mix({0}, {0} * {1}, {2})'.format(col1,", "curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1]) v = parse_value_input(node.inputs[2])", "= '0.0' out_metallic = '0.0' out_occlusion = '1.0' out_specular =", "parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic = '1.0' elif node.type ==", "res) return res elif node.type == 'TEX_WAVE': curshader.add_function(c_functions.str_tex_wave) if node.inputs[0].is_linked:", "'TEX_POINTDENSITY': # Pass through return to_vec3([0.0, 0.0, 0.0]) elif node.type", "return None curshader.write('vec3 {0} = {1};'.format(res_var, res)) elif st ==", "elif node.type == 'ATTRIBUTE': if socket == node.outputs[0]: # Color", "'OUTPUT_MATERIAL') if output_node != None: parse_output(output_node, con, vert, frag, geom,", "3 elif(t >= 1449.0): i = 2 elif(t >= 1167.0):", "normal_parsed global frag if basecol_only: return if inp.is_linked == False:", "return 'n' elif socket == node.outputs[2]: # UV con.add_elem('tex', 'short2norm')", "bright, contr) elif node.type == 'GAMMA': out_col = parse_vector_input(node.inputs[0]) gamma", "3.11067539e+00], [3.37763626e+03, -4.34581697e-04, 1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01],", "'tex_magic({0} * {1} * 4.0)'.format(co, scale) if sample_bump: write_bump(node, res,", "'SEPXYZ': vec = parse_vector_input(node.inputs[0]) if socket == node.outputs[0]: return '{0}.x'.format(vec)", "'_i' curshader.write('int {0} = {1};'.format(index_var, index)) # Linear # Write", "node.type == 'ADD_SHADER': bc1, rough1, met1, occ1, spec1, opac1, emi1", "{1} + 0.66))'.format(co, scale) if sample_bump: write_bump(node, res, 0.1) return", "nor) elif node.type == 'NORMAL_MAP': if curshader == tese: return", "= node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0,", "def is_pow(num): return ((num & (num - 1)) == 0)", "== 'EASING': f = '0.0' elif grad == 'DIAGONAL': f", "def mat_name(): return mat_state.material.name def mat_batch(): return mat_state.batch def mat_bind_texture(tex):", "'1.0' out_opacity = '1.0' out_emission = '0.0' if node.type ==", "export def parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True,", "= 'dotNV' return 'fresnel({0}, {1})'.format(ior, dotnv) elif node.type == 'NEW_GEOMETRY':", "'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale)", "= node.space #map = node.uv_map # Color parse_normal_map_color_input(node.inputs[1], node.inputs[0]) return", "Revert to mix elif blend == 'DODGE': out_col = 'mix({0},", "global basecol_only global emission_found global particle_info global sample_bump global sample_bump_res", "== node.outputs[2]: # Object Index curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex'", "node_name(node.name) + '_facs' curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const", "'HAIR_INFO': return 'vec3(0.0)' # Tangent Normal elif node.type == 'OBJECT_INFO':", "occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2]) if parse_surface: out_basecol =", "op == 'POWER': out_val = 'pow({0}, {1})'.format(val1, val2) elif op", "# subsurface_radius = parse_vector_input(node.inputs[2]) # subsurface_color = parse_vector_input(node.inputs[3]) out_metallic =", "nor = parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type ==", "to_vec1(inp.default_value) def parse_value(node, socket): global particle_info global sample_bump if node.type", "node.outputs[2]: # Is Diffuse Ray return '1.0' elif socket ==", "-1.55078698e-08, 3.81675160e-04, -7.30646033e-01], [6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01] ] if (t", "'ceil({0})'.format(val1) elif op == 'FRACT': out_val = 'fract({0})'.format(val1) elif op", "+= 1 res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link)) curshader.write_textures -=", "points[i].location[0]) # Write index index_var = name + '_i' curshader.write('int", "pow({0}, ({1} < 0.5) ? 2.0 * {1} : 0.5", "elif node.type == 'OBJECT_INFO': if socket == node.outputs[2]: # Object", "= {2};'.format(prefix, fac_var, fac)) curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix,", "'({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2) if parse_opacity:", "({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0]) # Write", "= safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + ' ' + uname)", "return 'wposition' elif socket == node.outputs[1]: # Normal return 'n'", "out_opacity = '({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2)", "'({0} * {3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var)", "socket == node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 / (1.0", "# Transparent Depth return '0.0' elif socket == node.outputs[10]: #", "socket) elif node.type == 'MIX_SHADER': prefix = '' if node.inputs[0].is_linked", "node.type == 'BUMP': # Interpolation strength strength = parse_value_input(node.inputs[0]) #", "particle_info['index'] = True return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else", "# Age particle_info['age'] = True return 'p_age' if arm.utils.get_rp().arm_particles ==", "= 'sin({0})'.format(val1) elif op == 'COSINE': out_val = 'cos({0})'.format(val1) elif", "fac_var, facs_var) def write_normal(inp): if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT':", "'mix({0}, {1}, {2})'.format(col1, col2, fac_var) elif blend == 'ADD': out_col", "parse_value_input(node.inputs[1]) res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale) if sample_bump: write_bump(node,", "parse_value_input(node.inputs[1]) else: return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return", "= parse_vector_input(node.inputs[0]) # subsurface = parse_vector_input(node.inputs[1]) # subsurface_radius = parse_vector_input(node.inputs[2])", "out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var) elif", "None else: return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return", "{1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl)) curshader.write('float", "None if tex != None: curshader.write_textures += 1 res =", "UV con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif", "bpy.data.worlds['Arm'] # Surface if parse_surface or parse_opacity: parsed = {}", "+ {1}, {2})'.format(col1, col2, fac_var) elif blend == 'MULTIPLY': out_col", "= '((1.0 - {2}) * {0} + {2} * ((vec3(1.0)", "elif op == 'COSINE': out_val = 'cos({0})'.format(val1) elif op ==", "{0} + {1}, {2})'.format(col1, col2, fac_var) elif blend == 'MULTIPLY':", "if image_format != 'RGBA32': # tex['format'] = image_format interpolation =", "== node.outputs[0]: # Fresnel curshader.add_function(c_functions.str_fresnel) return 'fresnel(1.0 / (1.0 -", "== 'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness =", "interpolation = image_node.interpolation rpdat = arm.utils.get_rp() texfilter = rpdat.arm_texture_filter if", "tex != None: curshader.write_textures += 1 res = '{0}.a'.format(texture_store(node, tex,", "parse_value_input(node.inputs[1]) b = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(r, g, b)", "co = 'bposition' scale = parse_value_input(node.inputs[1]) res = 'tex_wave_f({0} *", "for missing texture curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))", "== 'ARCSINE': out_val = 'asin({0})'.format(val1) elif op == 'ARCCOSINE': out_val", "parts.. ar = res.split('(', 1) pre = ar[0] + '('", "# Write index index_var = node_name(node.name) + '_i' curshader.write('int {0}", "# Ray Length return '0.0' elif socket == node.outputs[8]: #", "= 'tex_brick_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res) return", "out_basecol = '({0} * {1})'.format(out_basecol, emission_strength) elif node.type == 'BSDF_GLASS':", "0.0 else: if (t >= 6365.0): i = 5 elif(t", "# # This module builds upon Cycles nodes work licensed", "+ {1} * {2})'.format(met1, met2, fac_var, fac_inv_var) out_occlusion = '({0}", "frag.write('n.xy *= {0};'.format(strength)) frag.write('n = normalize(TBN * n);') con.add_elem('tang', 'short4norm')", "Reflection Ray return '0.0' elif socket == node.outputs[6]: # Is", "'SOFT_LIGHT': out_col = '((1.0 - {2}) * {0} + {2}", "'OVERLAY': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "elif node.type == 'TEX_POINTDENSITY': # Pass through return to_vec3([0.0, 0.0,", "== 'NORMAL_MAP': if curshader == tese: return parse_vector_input(node.inputs[1]) else: #space", "camera space return 'vVecCam' elif node.type == 'NEW_GEOMETRY': if socket", "def to_vec3(v): return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2]) def node_by_type(nodes,", "0.0: # a = node.rotation[1] # out = 'vec3({0}.x *", "to_uniform(inp) else: return to_vec1(inp.default_value) def parse_value(node, socket): global particle_info global", "== node.outputs[3]: # Object return 'mposition' elif socket == node.outputs[4]:", "'clamp({0}, 0.0, 1.0)'.format(out_val) else: return out_val elif node.type == 'RGBTOBW':", "tex_link = node.name if node.arm_material_param else None if tex !=", "False sample_bump = False sample_bump_res = '' wrd = bpy.data.worlds['Arm']", "'_Emission' in wrd.world_defs: frag.write('emission = {0};'.format(out_emission)) if parse_opacity: frag.write('opacity =", "curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points)) elif node.type", "use Extension parameter from the Texture node instead # if", "if blend == 'MIX': out_col = 'mix({0}, {1}, {2})'.format(col1, col2,", "== len(s.encode()) ## def get_rp_renderer(): return arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return", "= parse_vector_input(node.inputs[20]) # tangent = parse_vector_input(node.inputs[21]) if parse_opacity: if len(node.inputs)", "else: co = 'bposition' grad = node.gradient_type if grad ==", "if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket == node.outputs[2]:", "and mat in mat_users: mat_user = mat_users[mat][0] if hasattr(mat_user.data, 'uv_layers'):", "out_occlusion = '({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2)", "= parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(r, g, b) elif node.type", "node.outputs[5]: # Is Reflection Ray return '0.0' elif socket ==", "as c_functions import shutil emission_found = False particle_info = None", "elif socket == node.outputs[2]: # UV con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x,", "tex_link=tex_link)) else: global parsed tex_store = store_var_name(node) # Pink color", "is not None or not is_ascii(texfile): # Extract packed data", "{2})'.format(col1, col2, fac_var) # Revert to mix # out_col =", "return '1.0' elif node.type == 'MATH': val1 = parse_value_input(node.inputs[0]) val2", "# mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6}))", "return parse_vector_input(node.inputs[0]) elif node.type == 'COMBXYZ': x = parse_value_input(node.inputs[0]) y", "Consecutive _ are reserved s = s.replace('_', '_x') return s", "len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0])) # Map vector return", "node.outputs[6]: # Backfacing return '(1.0 - float(gl_FrontFacing))' elif socket ==", "if output_node == None: return inp = output_node.inputs[index] parents.append(node) out_group", "{6})) * {3})'.format(\\ vector_curve(name + '0', vec + '.x', curves[0].points),", "node.type == 'NEW_GEOMETRY': if socket == node.outputs[6]: # Backfacing return", "if socket == node.outputs[3]: # Location particle_info['location'] = True return", "curshader.add_uniform('vec3 eye', link='_cameraPosition') return 'distance(eye, wposition)' elif node.type == 'FRESNEL':", "* 2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN", "write_normal(inp): if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT': normal_res = parse_vector_input(inp)", "node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "if normal_parsed: return normal_parsed = True frag.write_normal += 1 if", "res) return res elif node.type == 'LIGHT_FALLOFF': # Constant, linear,", "if '__' in s: # Consecutive _ are reserved s", "'Linear': interpolation = 'Linear' elif texfilter == 'Point': interpolation =", "rotation = node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0,", "tex = make_texture(node, tex_name) tex_link = node.name if node.arm_material_param else", "vector return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) *", "tex['name'] = tex_name tex['file'] = '' return '{0}.a'.format(texture_store(node, tex, tex_name,", "return parse_group_input(node, socket) elif node.type == 'MIX_SHADER': prefix = ''", "{1} * 0.5)'.format(opac1, opac2) elif node.type == 'BSDF_PRINCIPLED': if parse_surface:", "node.type == 'BSDF_HAIR': pass elif node.type == 'HOLDOUT': if parse_surface:", "out_val = 'floor({0})'.format(val1) elif op == 'CEIL': out_val = 'ceil({0})'.format(val1)", "return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac) elif node.type ==", "if node.use_min: # out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0],", "res)) elif st == 'VALUE': res = parse_value(l.from_node, l.from_socket) if", "return 'wposition' elif node.type == 'PARTICLE_INFO': if socket == node.outputs[3]:", "return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv) elif socket", "= [] normal_parsed = False rpdat = arm.utils.get_rp() if rpdat.arm_rp_displacement", "= {1};'.format(index_var, index)) # Linear # Write Xs array facs_var", "- frag for surface / tese for displacement global con", "= 'min({0}, {1} * {2})'.format(col1, col2, fac_var) elif blend ==", "True Normal return 'n' if curshader.shader_type == 'frag' else 'wnormal'", "write_normal(node.inputs[4]) # Revert to glossy out_basecol = parse_vector_input(node.inputs[0]) out_roughness =", "{1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var) out_metallic = '({0} *", "return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif socket == node.outputs[3]:", "fac)) index = '0' for i in range(1, len(points)): index", "if not os.path.isfile(arm.utils.asset_path(filepath)): arm.log.warn('Material ' + matname + '/' +", "curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name +", "= '' else: res = 'n' return res elif node.type", "rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1]) bc2, rough2,", "{0}[{1}];'.format(facs_var, len(points))) # TODO: Make const for i in range(0,", "not is_ascii(texfile): # Extract packed data / copy non-ascii texture", "image.name has_ext = filepath.endswith(('.jpg', '.png', '.hdr')) if not has_ext: #", "reserved s = s.replace('_', '_x') return s ## def make_texture(image_node,", "basecol_only) def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface,", "vector_curve(name + '3c', vec + '.z', curves[3].points)) elif node.type ==", "- 450.0) / 150.0)'.format(wl) # Vector elif node.type == 'CAMERA':", "'NORMAL': nor = parse_vector_input(node.inputs[0]) return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor) elif node.type", "{2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac) elif node.type ==", "# subsurface = parse_vector_input(node.inputs[1]) # subsurface_radius = parse_vector_input(node.inputs[2]) # subsurface_color", "elif node.type == 'DISPLACEMENT': height = parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1])", "node.type == 'GROUP': return parse_group(node, socket) elif node.type == 'GROUP_INPUT':", "if new_ext == 'png' else 'JPEG' arm.utils.convert_image(image, unpack_filepath, file_format=fmt) else:", "= node.rotation[0] # out = 'vec3({0}.y * {1} - {0}.z", "# out_val = 'float({0} % {1})'.format(val1, val2) out_val = 'mod({0},", "= parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3]) res = 'tex_checker({0}, {1}, {2},", "'DIVIDE': out_val = '({0} / {1})'.format(val1, val2) elif op ==", "if socket == node.outputs[1]: return parse_value_input(node.inputs[7]) else: return None else:", "pre, co, post, scl)) curshader.write('float {0}_3 = {1}{2} + vec3(0.0,", "uvlayers for Curve lays = mat_user.data.uv_layers # Second uvmap referenced", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "res) return res elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co", "* 0.5 + {1} * 0.5)'.format(rough1, rough2) out_metallic = '({0}", "return '0.0' elif node.type == 'CAMERA': # View Z Depth", "= parse_vector_input(node.inputs[2]) # subsurface_color = parse_vector_input(node.inputs[3]) out_metallic = parse_value_input(node.inputs[4]) out_specular", "return to_uniform(inp) else: return to_vec1(inp.default_value) def parse_value(node, socket): global particle_info", "fac_var) # Revert to mix elif blend == 'SOFT_LIGHT': out_col", "'wtangent' elif node.type == 'TEX_COORD': #obj = node.object #instance =", "== 'LIGHTEN': out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var)", "node.type == 'TEX_ENVIRONMENT': # Pass through return to_vec3([0.0, 0.0, 0.0])", "return res elif node.type == 'TEX_IMAGE': # Already fetched if", "blend == 'LIGHTEN': out_col = 'max({0}, {1} * {2})'.format(col1, col2,", "[ [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0,", "if len(s) == 1: arm.log.warn(matname + '/' + image.name +", "# Active shader - frag for surface / tese for", "{3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var) out_emission =", "'COMBHSV': curshader.add_function(c_functions.str_hue_sat) h = parse_value_input(node.inputs[0]) s = parse_value_input(node.inputs[1]) v =", "* 0.5 + {1} * 0.5)'.format(spec1, spec2) out_emission = '({0}", "- 0.0002;'.format(out_opacity)) # Volume # parse_volume_input(node.inputs[1]) # Displacement if _parse_displacement", "= parse_value_input(node.inputs[4]) # Normal if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP':", "'pow({0}, {1})'.format(val1, val2) elif op == 'LOGARITHM': out_val = 'log({0})'.format(val1)", "node.type == 'WAVELENGTH': curshader.add_function(c_functions.str_wavelength_to_rgb) wl = parse_value_input(node.inputs[0]) # Roughly map", "'INTENSITY': res = 'tex_voronoi({0} * {1}).a'.format(co, scale) else: # CELLS", "== 'FRACT': out_val = 'fract({0})'.format(val1) elif op == 'MODULO': #", "node.type == 'EMISSION': if parse_surface: # Multiply basecol out_basecol =", "0.994478524 rgb[2] = 1.56626022 elif (t < 965.0): rgb[0] =", "f = '0.0' elif grad == 'EASING': f = '0.0'", "Parametric return 'mposition' elif node.type == 'HAIR_INFO': return 'vec3(0.0)' #", "mat_state.bind_textures.append(tex) def mat_texture_grad(): return mat_state.texture_grad def mat_get_material(): return mat_state.material def", "arm.utils.safestr(texfile) s = tex['file'].rsplit('.', 1) if len(s) == 1: arm.log.warn(matname", "1167.0): i = 1 else: i = 0 r =", "def parse_group(node, socket): # Entering group index = socket_index(node, socket)", "blend == 'ADD': out_col = 'mix({0}, {0} + {1}, {2})'.format(col1,", "'_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const for i", "== 'REROUTE': return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else: return None def", "return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else: return out_col elif node.type ==", "{1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend) elif node.type", "scale = parse_value_input(node.inputs[4]) res = 'tex_brick({0} * {4}, {1}, {2},", "'On' else 'vec3(0.0)' elif socket == node.outputs[6]: # Angular Velocity", "{0}.z * {0}.z), 0.0)'.format(co) res = '(clamp({0}, 0.0, 1.0))'.format(f) if", "op == 'LOGARITHM': out_val = 'log({0})'.format(val1) elif op == 'SQRT':", "filepath, \"JPEG\") else: arm.log.warn(matname + '/' + image.name + '", "== lays[1].name: con.add_elem('tex1', 'short2norm') return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'", "node.type == 'FRESNEL': curshader.add_function(c_functions.str_fresnel) ior = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv", "tex['file'].rsplit('.', 1) if len(s) == 1: arm.log.warn(matname + '/' +", "mat_bind_texture(tex) con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked: uv_name =", "# out_val = 'round({0})'.format(val1) out_val = 'floor({0} + 0.5)'.format(val1) elif", "== 'GAMMA': out_col = parse_vector_input(node.inputs[0]) gamma = parse_value_input(node.inputs[1]) return 'pow({0},", "- texCoord1.y, 0.0)' return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif", "0.0)' return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)' elif node.type ==", "return '0.0' elif socket == node.outputs[10]: # Transmission Depth return", "def to_uniform(inp): uname = safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + '", "== node.outputs[7]: # Ray Length return '0.0' elif socket ==", "'LINEAR_LIGHT': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert", "'vec3(0.0)' elif node.type == 'TANGENT': return 'wtangent' elif node.type ==", "val = parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4]) return", "node.outputs[1]: # Normal return 'n' elif socket == node.outputs[2]: #", "tex_name, to_linear=False, tex_link=tex_link)) else: global parsed tex_store = store_var_name(node) #", "def store_var_name(node): return node_name(node.name) + '_store' def texture_store(node, tex, tex_name,", "node.outputs[7]: # Pointiness return '0.0' elif node.type == 'HAIR_INFO': #", "= node_name(node.name) + '_fac_inv' curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac))", "vec2) else: return '0.0' ## def vector_curve(name, fac, points): #", "return to_vec3(elems[0].color) # Write cols array cols_var = node_name(node.name) +", "2.0, {0}_fh2));'.format(sample_bump_res)) res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) *", "triplanar = node.projection == 'BOX' if triplanar: curshader.write(f'vec3 texCoordBlend =", "col2, fac_var) elif blend == 'LIGHTEN': out_col = 'max({0}, {1}", "'VECTOR': return '{0}.x'.format(res_var) else: # VALUE return res_var else: if", "1902.0): i = 3 elif(t >= 1449.0): i = 2", "import arm.make_state import arm.log import arm.material.mat_state as mat_state import arm.material.cycles_functions", "mat_users != None and mat in mat_users: mat_user = mat_users[mat][0]", "to_uniform(inp): uname = safesrc(inp.node.name) + safesrc(inp.name) curshader.add_uniform(glsl_type(inp.type) + ' '", "global particle_info global sample_bump if node.type == 'GROUP': if node.node_tree.name.startswith('Armory", "parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4]) return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col,", "to_linear: curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store)) return tex_store def write_bump(node, res,", "elif inp.type == 'RGBA': return parse_vector_input(inp) elif inp.type == 'VECTOR':", "+ '/' + image.name + ' - file extension required", "ext[0], ext[1], ext[2], ext[3])) curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2", "name + '_xs' curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const", "'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res) sample_bump_res = ''", "= node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value if node.inputs['Location'].enabled", "{0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_TRANSPARENT': if parse_opacity: out_opacity = '(1.0", "name') return None ext = s[1].lower() do_convert = ext not", "or st == 'VECTOR': res = parse_vector(l.from_node, l.from_socket) if res", "'vposition' elif socket == node.outputs[5]: # Window return 'vec3(0.0)' #", "0.5 + {1} * 0.5)'.format(met1, met2) out_occlusion = '({0} *", "out_val = 'float({0} > {1})'.format(val1, val2) elif op == 'ROUND':", "== 'CEIL': out_val = 'ceil({0})'.format(val1) elif op == 'FRACT': out_val", "mix elif blend == 'HUE': out_col = 'mix({0}, {1}, {2})'.format(col1,", "if parse_surface: # Base color out_basecol = parse_vector_input(node.inputs[0]) # Occlusion", "out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_TRANSPARENT': if", "b = parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(r, g, b) elif", "elif node.type == 'VERTEX_COLOR': con.add_elem('col', 'short4norm') # Vcols only for", "parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_metallic =", "global con global vert global frag global geom global tesc", "return 'wtangent' elif node.type == 'TEX_COORD': #obj = node.object #instance", "socket == node.outputs[0]: return '{0}.r'.format(col) elif socket == node.outputs[1]: return", "converts .PNG to .jpg? Convert ext to lowercase on windows", "res = parse_value(l.from_node, l.from_socket) if res == None: return None", "{3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var) elif node.type == 'CURVE_VEC': #", "grad = node.gradient_type if grad == 'LINEAR': f = '{0}.x'.format(co)", "'RGB': if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn),", "# Get index fac_var = node_name(node.name) + '_fac' curshader.write('float {0}", "+ ({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position) #", "elif blend == 'ADD': out_col = 'mix({0}, {0} + {1},", "# Write index index_var = name + '_i' curshader.write('int {0}", "elif grad == 'RADIAL': f = 'atan({0}.y, {0}.x) / PI2", "link='$noise256.png') curshader.add_function(c_functions.str_tex_noise) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "'.hdr')) if not has_ext: # Raw bytes, write converted .jpg", "= '1.0' elif node.type == 'VOLUME_ABSORPTION': pass elif node.type ==", "= '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link)) curshader.write_textures -= 1 return", "res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale) if sample_bump:", "'fresnel({0}, {1})'.format(ior, dotnv) elif node.type == 'NEW_GEOMETRY': if socket ==", "curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;')", "({1} - vec3(0.5))))'.format(col1, col2, fac_var) if node.use_clamp: return 'clamp({0}, vec3(0.0),", "# aniso_rot = parse_vector_input(node.inputs[9]) # sheen = parse_vector_input(node.inputs[10]) # sheen_tint", "texfilter == 'Anisotropic': interpolation = 'Smart' elif texfilter == 'Linear':", "out_val = '({0} - {1})'.format(val1, val2) elif op == 'MULTIPLY':", "' fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac' fac_inv_var", "elif grad == 'DIAGONAL': f = '({0}.x + {0}.y) *", "# Revert to mix elif blend == 'COLOR': out_col =", "{2}, {3}))'.format(out, scale[0], scale[1], scale[2]) if rotation[2] != 0.0: #", "# Parametric return 'mposition' elif node.type == 'HAIR_INFO': return 'vec3(0.0)'", "if parse_opacity: frag.write('opacity = {0} - 0.0002;'.format(out_opacity)) # Volume #", "'3b', vec + '.y', curves[3].points), vector_curve(name + '3c', vec +", "elif op == 'SQRT': out_val = 'sqrt({0})'.format(val1) elif op ==", "for now return 'vcolor' else: # Vector con.add_elem('tex', 'short2norm') #", "# Constant, linear, quadratic # Shaders default to quadratic for", "'/' + image.name + ' - invalid file path') return", "False def to_vec1(v): return str(v) def to_vec3(v): return 'vec3({0}, {1},", "parse_shader_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type == 'REROUTE':", "fac_var, fac_inv_var) elif node.type == 'ADD_SHADER': bc1, rough1, met1, occ1,", "particle_info global sample_bump global sample_bump_res # RGB if node.type ==", "* sin(theta) # x * sin(theta) + y * cos(theta)", "0.5 / (1.0 - {1})))'.format(dotnv, blend) elif node.type == 'LIGHT_PATH':", "{1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl)) sample_bump", "== 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'ATTRIBUTE': #", "reroute return to_vec3([0.0, 0.0, 0.0]) else: if mat_batch() and inp.is_uniform:", "else: co = 'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2])", "color for missing texture curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0,", "#node.use_pixel_size # size = parse_value_input(node.inputs[0]) return '0.0' elif node.type ==", "filepath = './' + image.name has_ext = filepath.endswith(('.jpg', '.png', '.hdr'))", "elif socket == node.outputs[1]: # Facing return '(1.0 - pow({0},", "1 return '{0}.rgb'.format(tex_store) elif node.type == 'TEX_MAGIC': curshader.add_function(c_functions.str_tex_magic) if node.inputs[0].is_linked:", "'{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link)) curshader.write_textures -= 1 return res", "exist yet if image.packed_file is not None: if not os.path.isfile(unpack_filepath)", "'On' else '0.0' elif socket == node.outputs[2]: # Lifetime particle_info['lifetime']", "+ g[2] rgb[2] = ((b[0] * t + b[1]) *", "for Curve lays = mat_user.data.uv_layers # Second uvmap referenced if", "con.add_elem('tex', 'short2norm') curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link) if node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0])", "if matname is None: matname = mat_state.material.name if image is", "{2})'.format(col1, col2, fac_var) elif blend == 'SUBTRACT': out_col = 'mix({0},", "= parent.inputs[index] res = parse_input(inp) parents.append(parent) # Return to group", "+ '_' + safesrc(socket.name) + '_res' def write_result(l): global parsed", "= True return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0'", "{3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var,", "None: curshader.write('n = {0};'.format(normal_res)) def is_parsed(s): global parsed return s", "_con vert = _vert frag = _frag geom = _geom", "Displacement if socket == node.outputs[1]: return parse_value_input(node.inputs[7]) else: return None", "in parsed def res_var_name(node, socket): return node_name(node.name) + '_' +", "op == 'DOT_PRODUCT': return 'dot({0}, {1})'.format(vec1, vec2) else: return '0.0'", "= 'n' return res elif node.type == 'MAPPING': out =", "Write bytes if size is different or file does not", "'({0} * {3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var)", "parse_value_input(node.inputs[3]) res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale)", "t + b[2]) * t + b[3] # Pass constant", "'LESS_THAN': out_val = 'float({0} < {1})'.format(val1, val2) elif op ==", "# TODO: Make const for i in range(0, len(points)): curshader.write('{0}[{1}]", "elif node.type == 'MATH': val1 = parse_value_input(node.inputs[0]) val2 = parse_value_input(node.inputs[1])", "blend == 'SATURATION': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)", "+ '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec", "# Color con.add_elem('col', 'short4norm') # Vcols only for now return", "* {2})'.format(met1, met2, fac_var, fac_inv_var) out_occlusion = '({0} * {3}", "parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = '1.0' out_metallic =", "elif socket == node.outputs[10]: # Transmission Depth return '0.0' elif", "parse_group(node, socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif", "= vec2(0.0);') # Temp curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0,", "== socket: return i def node_name(s): for p in parents:", "== 'RGBTOBW': col = parse_vector_input(node.inputs[0]) return '((({0}.r * 0.3 +", "mix elif blend == 'BURN': out_col = 'mix({0}, {1}, {2})'.format(col1,", "curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom') return 'objectInfoRandom' elif node.type == 'PARTICLE_INFO': if", "write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness = parse_value_input(node.inputs[1]) out_specular = '0.0'", "'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co) elif grad == 'QUADRATIC_SPHERE':", "nor = parse_vector_input(node.inputs[3]) if sample_bump_res != '': if node.invert: ext", "node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'VERTEX_COLOR':", "if len(lays) > 1 and node.uv_map == lays[1].name: con.add_elem('tex1', 'short2norm')", "textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name)) sample_bump = False if", "RGB Curves fac = parse_value_input(node.inputs[0]) vec = parse_vector_input(node.inputs[1]) curves =", "def is_parsed(s): global parsed return s in parsed def res_var_name(node,", "TODO: delete cache when file changes if not os.path.isfile(converted_path): fmt", "co = parse_vector_input(node.inputs[0]) else: co = 'bposition' grad = node.gradient_type", "= node.color_ramp.elements if len(elems) == 1: return to_vec3(elems[0].color) # Write", "* {2}, {0}.y * {2} + {0}.z * {1}, 0.0)'.format(out,", "= mat_state.material.name if image is None: return None # Get", "Compute nodes only once global parents global normal_parsed global curshader", "1 def parse_value_input(inp): if inp.is_linked: l = inp.links[0] if l.from_node.type", "node.outputs[0]: return '{0}.x'.format(vec) elif socket == node.outputs[1]: return '{0}.y'.format(vec) elif", "node.coloring == 'INTENSITY': res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale) else:", "= 'Closest' # TODO: Blender seems to load full images", "= node.rotation[1] # out = 'vec3({0}.x * {1} - {0}.z", "= {1};'.format(fac_var, fac)) col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) blend", "= {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))", "= node.operation if op == 'ADD': return '({0} + {1})'.format(vec1,", "texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) *", "wl = parse_value_input(node.inputs[0]) # Roughly map to cycles - 450", "'COMBXYZ': x = parse_value_input(node.inputs[0]) y = parse_value_input(node.inputs[1]) z = parse_value_input(node.inputs[2])", "scl)) curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre,", "transmission_roughness = parse_vector_input(node.inputs[16]) if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0: out_emission", "array cols_var = node_name(node.name) + '_cols' curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) #", "Extend or clip tex['u_addressing'] = 'clamp' tex['v_addressing'] = 'clamp' if", "'' if node.inputs[0].is_linked else 'const ' fac = parse_value_input(node.inputs[0]) fac_var", "* t + r[2] rgb[1] = g[0] * t_inv +", "== node.outputs[0]: return to_vec3(node.outputs[0].default_value) elif socket == node.outputs[1]: # TODO:", "out_emission = '1.0' emission_found = True emission_strength = parse_value_input(node.inputs[1]) out_basecol", "# Material Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif socket", "node.type == 'VECT_TRANSFORM': #type = node.vector_type #conv_from = node.convert_from #conv_to", "node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return", "+= '_texread' s = safesrc(s) if '__' in s: #", "res) return res elif node.type == 'TEX_ENVIRONMENT': # Pass through", "l = inp.links[0] if l.from_node.type == 'REROUTE': return parse_shader_input(l.from_node.inputs[0]) return", "emi1 = parse_shader_input(node.inputs[0]) bc2, rough2, met2, occ2, spec2, opac2, emi2", "out_occlusion, out_specular, out_opacity, out_emission def parse_shader(node, socket): global emission_found out_basecol", "'objectInfoIndex' elif socket == node.outputs[3]: # Material Index curshader.add_uniform('float objectInfoMaterialIndex',", "parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_HAIR':", "elif grad == 'SPHERICAL': f = 'max(1.0 - sqrt({0}.x *", "opac1, emi1 = parse_shader_input(node.inputs[0]) bc2, rough2, met2, occ2, spec2, opac2,", "fac_inv_var) out_emission = '({0} * {3} + {1} * {2})'.format(emi1,", "'NORMALIZE': return 'normalize({0})'.format(vec1) elif node.type == 'DISPLACEMENT': height = parse_value_input(node.inputs[0])", "hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve lays = mat_user.data.uv_layers", "1 return res elif node.image == None: # Empty texture", "else: # CELLS res = 'tex_voronoi({0} * {1}).r'.format(co, scale) if", "op == 'LESS_THAN': out_val = 'float({0} < {1})'.format(val1, val2) elif", "{0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif node.type", "else: curshader.add_uniform('vec3 eye', link='_cameraPosition') return 'distance(eye, wposition)' elif node.type ==", "* 0.11) / 3.0) * 2.5)'.format(col) elif node.type == 'SEPHSV':", "file changes if not os.path.isfile(converted_path): fmt = 'PNG' if new_ext", "elif node.type == 'BSDF_DIFFUSE': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0])", "if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name) tex = make_texture(node,", "None # Get filepath filepath = image.filepath if filepath ==", "{0} / {1}))'.format(col1, col2, fac_var) elif blend == 'DIFFERENCE': out_col", "image.source == 'MOVIE': tex['source'] = 'movie' tex['min_filter'] = 'linear' tex['mag_filter']", "'Point': interpolation = 'Closest' # TODO: Blender seems to load", "== node.outputs[0]: return '{0}.r'.format(col) elif socket == node.outputs[1]: return '{0}.g'.format(col)", "parse_value_input(node.inputs[0]) sat = parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3])", "+ {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var) elif node.type ==", "+ image.name + ' - file not found(' + filepath", "= parse_vector_input(node.inputs[10]) # sheen_tint = parse_vector_input(node.inputs[11]) # clearcoat = parse_vector_input(node.inputs[12])", "= parse_value_input(node.inputs[7]) # aniso = parse_vector_input(node.inputs[8]) # aniso_rot = parse_vector_input(node.inputs[9])", "{1}, {2})'.format(col1, col2, fac_var) elif blend == 'MULTIPLY': out_col =", "node.outputs[4]: # Is Singular Ray return '0.0' elif socket ==", "elif node.type == 'MAPPING': out = parse_vector_input(node.inputs[0]) scale = node.inputs['Scale'].default_value", "global geom global tesc global tese global parse_surface global parse_opacity", "* {1}, {2})'.format(col1, col2, fac_var) elif blend == 'SUBTRACT': out_col", "directly') parse_normal_map_color_input(node.inputs[5]) # Emission if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0:", "blackbody_table_r[i] g = blackbody_table_g[i] b = blackbody_table_b[i] t_inv = 1.0", "+ ' - file not found(' + filepath + ')')", "Shaders default to quadratic for now return '1.0' elif node.type", "== None: # Empty texture tex = {} tex['name'] =", "vec3({4}, {5}, {6})) * {3})'.format(\\ vector_curve(name + '0', vec +", "out_roughness = parse_value_input(node.inputs[3]) # Metallic out_metallic = parse_value_input(node.inputs[4]) # Normal", "{0} * {1}, {2})'.format(col1, col2, fac_var) elif blend == 'SUBTRACT':", "{2}.xy);'.format(tex_store, tex_name, uv_name)) if sample_bump: sample_bump_res = tex_store curshader.write('float {0}_1", "+ {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) if location[0] !=", "== node.outputs[4]: # Is Singular Ray return '0.0' elif socket", "parse_value_input(node.inputs[3]) res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale) if", "{2})'.format(col1, col2, fac_var) elif blend == 'LIGHTEN': out_col = 'max({0},", "tex_name, to_linear=False, tex_link=None): global sample_bump global sample_bump_res global parsed tex_store", "return ((num & (num - 1)) == 0) and num", "# Write Ys array ys_var = name + '_ys' curshader.write('float", "tex_name = node_name(node.name) tex = make_texture(node, tex_name) tex_link = node.name", "curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1])) # Get index fac_var =", "elif node.type == 'VOLUME_SCATTER': pass return out_basecol, out_roughness, out_metallic, out_occlusion,", "= parse_value_input(node.inputs[1]) res = 'tex_wave_f({0} * {1})'.format(co, scale) if sample_bump:", "is None: matname = mat_state.material.name if image is None: return", "PBR, connect Image Texture directly') parse_normal_map_color_input(node.inputs[5]) # Emission if node.inputs[6].is_linked", "node.outputs[0]: # Color con.add_elem('col', 'short4norm') # Vcols only for now", "if curshader.shader_type == 'frag' else 'wnormal' elif socket == node.outputs[2]:", "parse_value_input(node.inputs[1]) op = node.operation if op == 'ADD': out_val =", "float f = (pos - start) * (1.0 / (finish", "parse_opacity global basecol_only global emission_found global particle_info global sample_bump global", "curshader == tese: return parse_vector_input(node.inputs[1]) else: #space = node.space #map", "= '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_HAIR': pass elif", "clearcoar_normal = parse_vector_input(node.inputs[20]) # tangent = parse_vector_input(node.inputs[21]) if parse_opacity: if", "converted .jpg to /unpacked filepath += '.raw' elif image.source ==", "# Window return 'vec3(0.0)' # 'wvpposition' elif socket == node.outputs[6]:", "res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link)) curshader.write_textures -= 1 return", "sample_bump_res = store_var_name(node) + '_bump' # Testing.. get function parts..", "== 'ARCCOSINE': out_val = 'acos({0})'.format(val1) elif op == 'ARCTANGENT': out_val", "out_specular = parse_value_input(node.inputs[5]) # specular_tint = parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7])", "if len(elems) == 1: return to_vec3(elems[0].color) # Write cols array", "if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): # Displacement if", "{0} = {1};'.format(index_var, index)) # Linear # Write Xs array", "= {1};'.format(fac_var, fac)) index = '0' for i in range(1,", "for i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0]))", "emi2) if parse_opacity: out_opacity = '({0} * 0.5 + {1}", "= 'tex_wave_f({0} * {1})'.format(co, scale) if sample_bump: write_bump(node, res) return", "node_name(node.name) + '_fac' curshader.write('float {0} = {1};'.format(fac_var, fac)) col1 =", "gamma = parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma) elif node.type ==", "vec3(1.0) - ({0}), {1})'.format(out_col, fac) elif node.type == 'MIX_RGB': fac", "inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT': normal_res = parse_vector_input(inp) if normal_res", "out_basecol = parse_vector_input(node.inputs[0]) # subsurface = parse_vector_input(node.inputs[1]) # subsurface_radius =", "# Facing return '(1.0 - pow({0}, ({1} < 0.5) ?", "{1};'.format(fac_var, fac)) index = '0' for i in range(1, len(elems)):", "+ {1})'.format(vec1, vec2) elif op == 'SUBTRACT': return '({0} -", "normalize(TBN * n);') con.add_elem('tang', 'short4norm') frag.write_normal -= 1 def parse_value_input(inp):", "express or implied. # See the License for the specific", "t == 'RGBA' or t == 'VECTOR': return 'vec3' else:", "== 'HOLDOUT': if parse_surface: # Occlude out_occlusion = '0.0' elif", "image.packed_file is not None: filepath = './' + image.name has_ext", "import arm.material.cycles_functions as c_functions import shutil emission_found = False particle_info", "op == 'AVERAGE': return '(({0} + {1}) / 2.0)'.format(vec1, vec2)", "'VALUE': if node.arm_material_param: nn = 'param_' + node_name(node.name) curshader.add_uniform('float {0}'.format(nn),", "+ y * cos(theta) out = 'vec3({0}.x * {1} -", "node.type == 'BSDF_ANISOTROPIC': if parse_surface: write_normal(node.inputs[4]) # Revert to glossy", "parse_shader_input(l.from_node.inputs[0]) return parse_shader(l.from_node, l.from_socket) else: out_basecol = 'vec3(0.8)' out_roughness =", "= '(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0)", "return res elif node.type == 'TEX_GRADIENT': if node.inputs[0].is_linked: co =", "== 'VOLUME_SCATTER': pass return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity,", "i in range(0, len(points)): curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1])) #", "150.0)'.format(wl) # Vector elif node.type == 'CAMERA': # View Vector", "filepath + ')') return None if do_convert: unpack_path = os.path.join(arm.utils.get_fp_build(),", "Diffuse Ray return '1.0' elif socket == node.outputs[3]: # Is", "constant return to_vec3([rgb[0], rgb[1], rgb[2]]) elif node.type == 'VALTORGB': #", "parsed def res_var_name(node, socket): return node_name(node.name) + '_' + safesrc(socket.name)", "Z axis for now.. a = rotation[2] # x *", "= parse_vector_input(node.inputs[0]) out_roughness = '1.0' out_metallic = '1.0' elif node.type", "# Entering group index = socket_index(node, socket) output_node = node_by_type(node.node_tree.nodes,", "3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength)) curshader.write('vec3 {0}_a =", "== 'LIGHT_PATH': if socket == node.outputs[0]: # Is Camera Ray", "frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp))) if strength_input", "node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) else: return out_col elif node.type", "not found(' + filepath + ')') return None if do_convert:", "fetched if is_parsed(store_var_name(node)): return '{0}.rgb'.format(store_var_name(node)) tex_name = node_name(node.name) tex =", "'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale) if sample_bump: write_bump(node,", "hue, sat, val, fac) elif node.type == 'INVERT': fac =", "= node.convert_from #conv_to = node.convert_to # Pass throuh return parse_vector_input(node.inputs[0])", "b[1]) * t + b[2]) * t + b[3] #", "= {0};'.format(out_specular)) if '_Emission' in wrd.world_defs: frag.write('emission = {0};'.format(out_emission)) if", "node.outputs[1]: # Normal return 'n' if curshader.shader_type == 'frag' else", "global parsed # Compute nodes only once global parents global", "'RGBA' or st == 'VECTOR': res = parse_vector(l.from_node, l.from_socket) if", "True frag.write_normal += 1 if not get_arm_export_tangents() or mat_get_material().arm_decal: #", "parse_vector_input(node.inputs[12]) # clearcoat_rough = parse_vector_input(node.inputs[13]) # ior = parse_vector_input(node.inputs[14]) #", "parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3]) res = 'tex_checker({0},", "= '({0} * {3} + {1} * {2})'.format(spec1, spec2, fac_var,", "!= image.packed_file.size: with open(unpack_filepath, 'wb') as f: f.write(image.packed_file.data) # Copy", "sample_bump_res = '' else: res = 'n' return res elif", "parse_vector_input(node.inputs[0]) return '((({0}.r * 0.3 + {0}.g * 0.59 +", "res = 'tex_checker_f({0}, {1})'.format(co, scale) if sample_bump: write_bump(node, res) return", "== 'TEX_IMAGE': # Already fetched if is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name", "' - invalid file path') return None # Reference image", "'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0' elif socket ==", "'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale) else: # CELLS res = 'tex_voronoi({0}", "parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True,", "fac, points): # Write Ys array ys_var = name +", "+ '.x', curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points),", "= '1.0' out_specular = '1.0' out_opacity = '1.0' out_emission =", "* {1} - {0}.z * {2}, {0}.y * {2} +", "'DIVIDE': out_col = '(vec3((1.0 - {2}) * {0} + {2}", "+ '2', vec + '.z', curves[2].points), fac) elif node.type ==", "< 0.5) ? 2.0 * {1} : 0.5 / (1.0", "= image.filepath if filepath == '': if image.packed_file is not", "0.5 + {1} * 0.5)'.format(occ1, occ2) out_specular = '({0} *", "= {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))", "len(elems)): curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position)) # Mix color #", "res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale) else: # CELLS res", "noise curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else: co =", "tex['source'] = 'movie' tex['min_filter'] = 'linear' tex['mag_filter'] = 'linear' tex['mipmap_filter']", "defaults to linear if image_node.extension != 'REPEAT': # Extend or", "1.0 or scale[2] != 1.0: out = '({0} * vec3({1},", "elif l.from_node.type == 'NORMAL_MAP': return None return res_var def glsl_type(t):", "pre = ar[0] + '(' if ',' in ar[1]: ar2", "= tex_name tex['file'] = '' return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False,", "= 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1]) return out elif", "# Metallic out_metallic = parse_value_input(node.inputs[4]) # Normal if node.inputs[5].is_linked and", "= ar[1][:-1] post = ')' curshader.write('float {0}_1 = {1}{2} +", "output_node != None: parse_output(output_node, con, vert, frag, geom, tesc, tese,", "+ {1} * 0.5)'.format(occ1, occ2) out_specular = '({0} * 0.5", "else: return '0.0' elif node.type == 'CAMERA': # View Z", "normal_parsed: return normal_parsed = True frag.write_normal += 1 if not", "== node.outputs[0]: # Position return 'wposition' elif socket == node.outputs[1]:", "Convert ext to lowercase on windows if arm.utils.get_os() == 'win':", "ext not in ('jpg', 'png', 'hdr', 'mp4') # Convert image", "True st = l.from_socket.type if st == 'RGB' or st", "{0}.r)'.format(parse_vector_input(node.inputs[0])) elif node.type == 'BSDF_VELVET': if parse_surface: write_normal(node.inputs[2]) out_basecol =", "curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex' elif socket == node.outputs[3]: #", "global frag global geom global tesc global tese global parse_surface", "0.0)'.format(co) res = '(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res)", "not os.path.isfile(unpack_filepath): fmt = 'PNG' if new_ext == 'png' else", "[-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05, 1.30656109e+00], [-5.00279505e+02,", "or location[2] != 0.0: out = '({0} + vec3({1}, {2},", "# UV con.add_elem('tex', 'short2norm') return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'", "interpolation = 'Smart' elif texfilter == 'Linear': interpolation = 'Linear'", "socket == node.outputs[2]: return '{0}.b'.format(col) elif node.type == 'SEPXYZ': vec", "sample_bump: write_bump(node, res) return res elif node.type == 'TEX_NOISE': curshader.add_function(c_functions.str_tex_noise)", "'({0} - {1})'.format(val1, val2) elif op == 'MULTIPLY': out_val =", "_parse_opacity, _parse_displacement, _basecol_only): global parsed # Compute nodes only once", "# tangent = parse_vector_input(node.inputs[21]) if parse_opacity: if len(node.inputs) > 20:", "socket == node.outputs[5]: # Velocity particle_info['velocity'] = True return 'p_velocity'", "node_by_type(nodes, 'OUTPUT_MATERIAL') if output_node != None: parse_output(output_node, con, vert, frag,", "== 'BSDF_REFRACTION': # write_normal(node.inputs[3]) pass elif node.type == 'SUBSURFACE_SCATTERING': if", "sat = parse_value_input(node.inputs[1]) val = parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3]) col", "tex_name) tex_link = node.name if node.arm_material_param else None if tex", "arm.utils.extract_filename(filepath) tex['file'] = arm.utils.safestr(texfile) s = tex['file'].rsplit('.', 1) if len(s)", "return '(({0} + {1}) / 2.0)'.format(vec1, vec2) elif op ==", "= vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp curshader.write(f'vec4 {tex_store}", "elif node.type == 'MIX_RGB': fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name)", "node.outputs[1]: curshader.add_include('std/math.glsl') curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj') return 'linearize(gl_FragCoord.z, cameraProj)' # View", "vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store)) return '{0}.a'.format(tex_store) elif node.type == 'TEX_MAGIC':", "* texCoordBlend.y;') curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy)", "Volume # parse_volume_input(node.inputs[1]) # Displacement if _parse_displacement and disp_enabled() and", "link=tex_link) if node.inputs[0].is_linked: uv_name = parse_vector_input(node.inputs[0]) uv_name = 'vec2({0}.x, 1.0", "= '(clamp({0}, 0.0, 1.0))'.format(f) if sample_bump: write_bump(node, res) return res", "= '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2]) #", "texCoord);') frag.write('n = TBN * normalize(texn);') else: frag.write('vec3 n =", "parse_surface: # Single channel out_occlusion = parse_vector_input(node.inputs[0]) + '.r' elif", "is not None: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size:", "is None: return None # Get filepath filepath = image.filepath", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "global normal_parsed global curshader # Active shader - frag for", "* t_inv + r[1] * t + r[2] rgb[1] =", "{0};'.format(out_specular)) if '_Emission' in wrd.world_defs: frag.write('emission = {0};'.format(out_emission)) if parse_opacity:", "node.rotation[1] # out = 'vec3({0}.x * {1} - {0}.z *", "curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name +", "# Linear # Write Xs array facs_var = name +", "elif socket == node.outputs[5]: # Is Reflection Ray return '0.0'", "'Cubic': # Mipmap linear tex['mipmap_filter'] = 'linear' tex['generate_mipmaps'] = True", "frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False): output_node =", "== node.outputs[8]: # Ray Depth return '0.0' elif socket ==", "'dot({0}, {1})'.format(vec1, vec2) else: return '0.0' ## def vector_curve(name, fac,", "parse_value(l.from_node, l.from_socket) if res == None: return None curshader.write('float {0}", "elif op == 'POWER': out_val = 'pow({0}, {1})'.format(val1, val2) elif", "res = 'n' return res elif node.type == 'MAPPING': out", "['2', '1', '4', '3'] curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2};", "if parse_surface or parse_opacity: parsed = {} parents = []", "if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)' elif socket == node.outputs[6]:", "'bposition' col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) scale = parse_value_input(node.inputs[3])", "if arm.utils.get_os() == 'win': s = filepath.rsplit('.', 1) arm.assets.add(arm.utils.asset_path(s[0] +", "curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res)) curshader.write('vec3 {0}_b = normalize(vec3(0.0,", "Version 2.0 (the \"License\"); # you may not use this", "'3'] curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 =", "fac_var) # Revert to mix elif blend == 'VALUE': out_col", "col1 = parse_vector_input(node.inputs[1]) col2 = parse_vector_input(node.inputs[2]) blend = node.blend_type if", "if parse_opacity: out_opacity = parse_value_input(node.inputs[1]) else: return parse_group(node, socket) elif", "node.type == 'TEX_MUSGRAVE': curshader.add_function(c_functions.str_tex_musgrave) if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "return arm.utils.get_rp().rp_renderer def get_arm_export_tangents(): return bpy.data.worlds['Arm'].arm_export_tangents def safesrc(name): return arm.utils.safesrc(name)", "mix elif blend == 'COLOR': out_col = 'mix({0}, {1}, {2})'.format(col1,", "parse_value_input(node.inputs[0]) midlevel = parse_value_input(node.inputs[1]) scale = parse_value_input(node.inputs[2]) nor = parse_vector_input(node.inputs[3])", "objectInfoIndex', link='_objectInfoIndex') return 'objectInfoIndex' elif socket == node.outputs[3]: # Material", "'' wrd = bpy.data.worlds['Arm'] # Surface if parse_surface or parse_opacity:", "Extract packed data / copy non-ascii texture unpack_path = os.path.join(arm.utils.get_fp_build(),", "out_roughness = parse_value_input(node.inputs[1]) out_specular = '0.0' elif node.type == 'BSDF_GLOSSY':", "+ '_i' curshader.write('int {0} = {1};'.format(index_var, index)) if interp ==", "'_' + safesrc(socket.name) + '_res' def write_result(l): global parsed res_var", "node.operation if op == 'ADD': return '({0} + {1})'.format(vec1, vec2)", "node.outputs[6]: # Is Transmission Ray return '0.0' elif socket ==", "'Assets', 'unpacked') if not os.path.exists(unpack_path): os.makedirs(unpack_path) converted_path = os.path.join(unpack_path, tex['file'])", "Foundation # # Licensed under the Apache License, Version 2.0", "= textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name)) curshader.write('float {0}_2 =", "+ 0.5)'.format(val1) elif op == 'FLOOR': out_val = 'floor({0})'.format(val1) elif", "post, scl)) curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res,", "assets_add_embedded_data('noise256.png') curshader.add_uniform('sampler2D snoise256', link='$noise256.png') if node.inputs[0].is_linked: co = parse_vector_input(node.inputs[0]) else:", "'const ' fac = parse_value_input(node.inputs[0]) fac_var = node_name(node.name) + '_fac'", "if output_node != None: parse_output(output_node, con, vert, frag, geom, tesc,", "bytes if size is different or file does not exist", "((num & (num - 1)) == 0) and num !=", "# Position return 'wposition' elif socket == node.outputs[1]: # Normal", "is_parsed(store_var_name(node)): return '{0}.a'.format(store_var_name(node)) tex_name = safesrc(node.name) tex = make_texture(node, tex_name)", "curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2])) #", "True height = parse_value_input(node.inputs[2]) sample_bump = False nor = parse_vector_input(node.inputs[3])", "Write Ys array ys_var = name + '_ys' curshader.write('float {0}[{1}];'.format(ys_var,", "f.write(image.packed_file.data) # Copy non-ascii texture else: if not os.path.isfile(unpack_filepath) or", "== 1: arm.log.warn(matname + '/' + image.name + ' -", "if '_Emission' in wrd.world_defs: frag.write('emission = {0};'.format(out_emission)) if parse_opacity: frag.write('opacity", "elems[i].color[0], elems[i].color[1], elems[i].color[2])) # Get index fac_var = node_name(node.name) +", "1.29189794e-04, 9.08181524e-01], [-1.22075471e+03, 2.56245413e-05, 1.20753416e+00], [-1.42546105e+03, -4.01730887e-05, 1.44002695e+00], [-1.18134453e+03, -2.18913373e-05,", "parse_value_input(node.inputs[2]) return 'vec3({0}, {1}, {2})'.format(x, y, z) elif node.type ==", "== 'INVERT': fac = parse_value_input(node.inputs[0]) out_col = parse_vector_input(node.inputs[1]) return 'mix({0},", "to quadratic for now return '1.0' elif node.type == 'NORMAL':", "{0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a)) if location[0] != 0.0", "if node.node_tree.name.startswith('Armory PBR'): if parse_surface: # Base color out_basecol =", "'SEPHSV': return '0.0' elif node.type == 'SEPRGB': col = parse_vector_input(node.inputs[0])", "1.64843306e+00], [4.10671449e+03, -8.61949938e-05, 6.41423749e-01], [4.66849800e+03, 2.85655028e-05, 1.29075375e-01], [4.60124770e+03, 2.89727618e-05, 1.48001316e-01],", "node_name(node.name) curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name)) return nn else: return to_vec3(socket.default_value) elif", "l.from_node.type == 'NORMAL_MAP': return None return res_var def glsl_type(t): if", "uv_name)) curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name))", "curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac,\\ vector_curve(name", "+ '/' + image.name + ' - file not found('", "n def socket_index(node, socket): for i in range(0, len(node.outputs)): if", "out_occlusion = parse_vector_input(node.inputs[0]) + '.r' elif node.type == 'BSDF_ANISOTROPIC': if", "node.outputs[0]: return '{0}.r'.format(col) elif socket == node.outputs[1]: return '{0}.g'.format(col) elif", "2.0 - 1.0;'.format(parse_vector_input(inp))) frag.write('texn.y = -texn.y;') frag.add_include('std/normals.glsl') frag.write('mat3 TBN =", "'time' else: return '0.0' elif node.type == 'CAMERA': # View", "'({0}.x)'.format(parse_vector_input(node.inputs[17])) emission_found = True # clearcoar_normal = parse_vector_input(node.inputs[20]) # tangent", "= 'tan({0})'.format(val1) elif op == 'ARCSINE': out_val = 'asin({0})'.format(val1) elif", "'({0} - {1})'.format(vec1, vec2) elif op == 'AVERAGE': return '(({0}", "frag.write_normal -= 1 def parse_value_input(inp): if inp.is_linked: l = inp.links[0]", "emission_found out_basecol = 'vec3(0.8)' out_roughness = '0.0' out_metallic = '0.0'", "particle_info global sample_bump if node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'):", "through return to_vec3([0.0, 0.0, 0.0]) elif node.type == 'TEX_VORONOI': curshader.add_function(c_functions.str_tex_voronoi)", "and inp.is_uniform: return to_uniform(inp) else: return to_vec3(inp.default_value) def parse_vector(node, socket):", "* {1}), tex_noise({0} * {1} + 0.33), tex_noise({0} * {1}", "else: return '0.0' ## def vector_curve(name, fac, points): # Write", "= parse_vector_input(node.inputs[6]) out_roughness = parse_value_input(node.inputs[7]) # aniso = parse_vector_input(node.inputs[8]) #", "return tex_store def write_bump(node, res, scl=0.001): global sample_bump global sample_bump_res", "else: return parse_group(node, socket) elif node.type == 'GROUP_INPUT': return parse_group_input(node,", "out_val = 'tan({0})'.format(val1) elif op == 'ARCSINE': out_val = 'asin({0})'.format(val1)", "scale = node.inputs['Scale'].default_value rotation = node.inputs['Rotation'].default_value location = node.inputs['Location'].default_value if", "'{0}.x'.format(res_var) else: # VALUE return res_var else: if mat_batch() and", "return '0.0' elif socket == node.outputs[8]: # Ray Depth return", "node.outputs[4]: # Incoming return 'vVec' elif socket == node.outputs[5]: #", "texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 =", "elif node.type == 'LAYER_WEIGHT': blend = parse_value_input(node.inputs[0]) if node.inputs[1].is_linked: dotnv", "socket == node.outputs[3]: # Location particle_info['location'] = True return 'p_location'", "'short4norm') # Vcols only for now return 'vcolor' else: #", "+ '.' + s[1].lower())) else: arm.assets.add(arm.utils.asset_path(filepath)) # if image_format !=", "{2})'.format(col1, col2, fac_var) # Revert to mix elif blend ==", "particle_info['lifetime'] = True return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else", "not None: if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size: with", "= parse_value_input(node.inputs[1]) if parse_opacity: out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0])) elif", "out_basecol = parse_vector_input(node.inputs[0]) elif node.type == 'BSDF_TOON': # write_normal(node.inputs[3]) pass", "delete cache when file changes if not os.path.isfile(converted_path): fmt =", "+ {1} * 0.5)'.format(spec1, spec2) out_emission = '({0} * 0.5", "tese global parse_surface global parse_opacity global basecol_only global emission_found global", "'_fac' fac_inv_var = node_name(node.name) + '_fac_inv' curshader.write('{0}float {1} = {2};'.format(prefix,", "= 'min({0}, {1})'.format(val1, val2) elif op == 'MAXIMUM': out_val =", "vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2]) # use Extension parameter", "= 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale) if sample_bump:", "node.type == 'GROUP_INPUT': return parse_group_input(node, socket) elif node.type == 'ATTRIBUTE':", "return 'clamp({0}, 0.0, 1.0)'.format(out_val) else: return out_val elif node.type ==", "= False def to_vec1(v): return str(v) def to_vec3(v): return 'vec3({0},", "if parse_surface: # Single channel out_occlusion = parse_vector_input(node.inputs[0]) + '.r'", "parse_vector_input(node.inputs[8]) # aniso_rot = parse_vector_input(node.inputs[9]) # sheen = parse_vector_input(node.inputs[10]) #", "'float({0} % {1})'.format(val1, val2) out_val = 'mod({0}, {1})'.format(val1, val2) elif", "+ {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var)", "return '1.0' elif socket == node.outputs[4]: # Is Singular Ray", "curshader.write('vec3 {0} = {1};'.format(res_var, res)) elif st == 'VALUE': res", "elif node.type == 'VECT_TRANSFORM': #type = node.vector_type #conv_from = node.convert_from", "= parse_value_input(node.inputs[2]) fac = parse_value_input(node.inputs[3]) col = parse_vector_input(node.inputs[4]) return 'hue_sat({0},", "if grad == 'LINEAR': f = '{0}.x'.format(co) elif grad ==", "mix elif blend == 'DODGE': out_col = 'mix({0}, {1}, {2})'.format(col1,", "tex['min_filter'] = 'point' tex['mag_filter'] = 'point' # else defaults to", "node.type == 'BSDF_GLOSSY': if parse_surface: write_normal(node.inputs[2]) out_basecol = parse_vector_input(node.inputs[0]) out_roughness", "Material Index curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex') return 'objectInfoMaterialIndex' elif socket ==", "Pass constant return to_vec3([rgb[0], rgb[1], rgb[2]]) elif node.type == 'VALTORGB':", "return parse_displacement_input(l.from_node.inputs[0]) return parse_vector_input(inp) else: return None def parse_vector_input(inp): if", "= 2 elif(t >= 1167.0): i = 1 else: i", "to_linear = node.image != None and node.image.colorspace_settings.name == 'sRGB' res", "'VALTORGB': # ColorRamp fac = parse_value_input(node.inputs[0]) interp = node.color_ramp.interpolation elems", "return '0.0' elif socket == node.outputs[7]: # Ray Length return", "else: i = 0 r = blackbody_table_r[i] g = blackbody_table_g[i]", "Map vector return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}])", "return 'vec3({0}, {1}, {2})'.format(x, y, z) elif node.type == 'VECT_MATH':", "tesc global tese global parse_surface global parse_opacity global basecol_only global", "node.type == 'GROUP': if node.node_tree.name.startswith('Armory PBR'): if parse_surface: # Base", "No uvlayers for Curve lays = mat_user.data.uv_layers # Second uvmap" ]
[ "= np.split(np.array([int(x) for x in self._select_val(\"colors\", key).split(\",\")]), 2) self.ui_pos =", "self._config[\"trapsin\"] if \"trapsin\" in self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian = self._config[\"barbarian\"] if", "there is no img available in assets/items\") self.colors = {}", "self._select_val(\"char\", \"battle_command\"), \"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\",", "if section in self._custom and key in self._custom[section]: return self._custom[section][key]", "section in self._pickit_config: return self._pickit_config[section][key] elif section in self._shop_config: return", "self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin = self._config[\"hammerdin\"] if \"hammerdin\" in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin", "self._select_val(\"char\", \"potion3\"), \"potion4\": self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\",", "\"melee_min_score\")), } if __name__ == \"__main__\": config = Config(print_warnings=True) #", "not found: {k}\") # Check if any item templates miss", "return self._pickit_config[section][key] elif section in self._shop_config: return self._shop_config[section][key] else: return", "You activated {key} in pickit, but there is no img", "\"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\":", "a config for filename in os.listdir(f'assets/items'): filename = filename.lower() if", "\"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\":", "print_warnings, what a hack... here it is, not making the", "= {} for key in self._config[\"routes\"]: self.routes[key] = bool(int(self._select_val(\"routes\", key)))", "\"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\", \"chicken\")),", "ass self._print_warnings = print_warnings self._config = configparser.ConfigParser() self._config.read('config/params.ini') self._game_config =", "\"stand_still\"), \"force_move\": self._select_val(\"char\", \"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")),", "\"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"), } self.routes = {} for key", "in config.items: if not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not found: {k}\") #", "\"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\":", "if \"barbarian\" in self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options = { \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\",", "int(self._select_val(\"claws\", \"melee_min_score\")), } if __name__ == \"__main__\": config = Config(print_warnings=True)", "\"resume_key\": self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\":", "self.char = { \"type\": self._select_val(\"char\", \"type\"), \"show_items\": self._select_val(\"char\", \"show_items\"), \"inventory_screen\":", "in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin = self._config[\"hammerdin\"] if \"hammerdin\" in self._custom:", "key)) if self.items[key] and not os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings: print(f\"Warning: You", "float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\": self._select_val(\"general\", \"exit_key\"), \"resume_key\": self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\": self._select_val(\"general\",", "\"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")), }", "self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin = self._config[\"hammerdin\"] if \"hammerdin\" in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"])", "os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not found: {k}\") # Check if any item", "\"chicken\")), \"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")), \"tp\": self._select_val(\"char\", \"tp\"), \"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")),", "\"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\": self._select_val(\"general\", \"exit_key\"), \"resume_key\": self._select_val(\"general\", \"resume_key\"),", "\"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")), } if __name__ == \"__main__\": config =", "self._config[section][key] elif section in self._pickit_config: return self._pickit_config[section][key] elif section in", "\"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")),", "\"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")), \"tp\": self._select_val(\"char\", \"tp\"),", "\"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\": self._select_val(\"char\", \"show_belt\"), \"potion1\": self._select_val(\"char\", \"potion1\"), \"potion2\":", "self._game_config[\"colors\"]: self.colors[key] = np.split(np.array([int(x) for x in self._select_val(\"colors\", key).split(\",\")]), 2)", "\"stand_still\": self._select_val(\"char\", \"stand_still\"), \"force_move\": self._select_val(\"char\", \"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\":", "for filename in os.listdir(f'assets/items'): filename = filename.lower() if filename.endswith('.png'): item_name", "a single config instance through bites me in the ass", "if \"trapsin\" in self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian = self._config[\"barbarian\"] if \"barbarian\"", "\"saved_games_folder\"), \"name\": self._select_val(\"general\", \"name\"), \"monitor\": int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")),", "hunting self.dclone = { \"region_ips\": self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"),", "\"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")), \"tp\":", "is, not making the effort # passing a single config", "self._select_val(\"char\", \"show_items\"), \"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"), \"stand_still\": self._select_val(\"char\", \"stand_still\"), \"force_move\": self._select_val(\"char\",", "\"potion2\"), \"potion3\": self._select_val(\"char\", \"potion3\"), \"potion4\": self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")),", "float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")), } self.sorceress = dict(self._config[\"sorceress\"]) if", "print_warnings self._config = configparser.ConfigParser() self._config.read('config/params.ini') self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config", "any item templates miss a config for filename in os.listdir(f'assets/items'):", "self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\": self._select_val(\"char\", \"battle_orders\"), \"battle_command\": self._select_val(\"char\", \"battle_command\"), \"casting_frames\": int(self._select_val(\"char\",", "\"inventory_screen\"), \"stand_still\": self._select_val(\"char\", \"stand_still\"), \"force_move\": self._select_val(\"char\", \"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")),", "in self._custom[section]: return self._custom[section][key] elif section in self._config: return self._config[section][key]", "# Check if any added items miss templates for k", "in self._game_config[\"colors\"]: self.colors[key] = np.split(np.array([int(x) for x in self._select_val(\"colors\", key).split(\",\")]),", "float(self._select_val(\"char\", \"merc_chicken\")), \"tp\": self._select_val(\"char\", \"tp\"), \"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\": self._select_val(\"char\",", "self._select_val(\"general\", \"exit_key\"), \"resume_key\": self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\",", "self._select_val(\"char\", \"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\",", "elif section in self._config: return self._config[section][key] elif section in self._pickit_config:", "key in self._game_config[\"ui_pos\"]: self.ui_pos[key] = int(self._select_val(\"ui_pos\", key)) self.ui_roi = {}", "{ \"region_ips\": self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"), } self.routes =", "in self._shop_config: return self._shop_config[section][key] else: return self._game_config[section][key] def __init__(self, print_warnings:", "# Check if any item templates miss a config for", "\"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")), } self.sorceress = dict(self._config[\"sorceress\"])", "self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"), } self.routes = {} for", "if any item templates miss a config for filename in", "for x in self._select_val(\"ui_roi\", key).split(\",\")]) self.path = {} for key", "configparser.ConfigParser() if os.environ.get('RUN_ENV') != \"test\" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general =", "float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\",", "self._select_val(\"char\", \"tp\"), \"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\": self._select_val(\"char\", \"show_belt\"), \"potion1\": self._select_val(\"char\",", "self._game_config[\"ui_roi\"]: self.ui_roi[key] = np.array([int(x) for x in self._select_val(\"ui_roi\", key).split(\",\")]) self.path", "float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")), \"tp\": self._select_val(\"char\",", "import numpy as np import os class Config: def _select_val(self,", "int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")), } if __name__ == \"__main__\":", "\"show_items\": self._select_val(\"char\", \"show_items\"), \"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"), \"stand_still\": self._select_val(\"char\", \"stand_still\"), \"force_move\":", "= dict(self._config[\"sorceress\"]) if \"sorceress\" in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin = self._config[\"hammerdin\"]", "np.reshape(np.array([int(x) for x in self._select_val(\"path\", key).split(\",\")]), (-1, 2)) self.shop =", "self.colors[key] = np.split(np.array([int(x) for x in self._select_val(\"colors\", key).split(\",\")]), 2) self.ui_pos", "= False): # print_warnings, what a hack... here it is,", "any added items miss templates for k in config.items: if", "min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1), 10), \"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"),", "for key in self._game_config[\"path\"]: self.path[key] = np.reshape(np.array([int(x) for x in", "\"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")),", "self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin = self._config[\"trapsin\"] if \"trapsin\" in self._custom: self.trapsin.update(self._custom[\"trapsin\"])", "in os.listdir(f'assets/items'): filename = filename.lower() if filename.endswith('.png'): item_name = filename[:-4]", "hack... here it is, not making the effort # passing", "in self._game_config[\"ui_roi\"]: self.ui_roi[key] = np.array([int(x) for x in self._select_val(\"ui_roi\", key).split(\",\")])", "\"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")),", "self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser() if os.environ.get('RUN_ENV') !=", "if \"sorceress\" in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin = self._config[\"hammerdin\"] if \"hammerdin\"", "\"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")), } if __name__ ==", "me in the ass self._print_warnings = print_warnings self._config = configparser.ConfigParser()", "key))) self.char = { \"type\": self._select_val(\"char\", \"type\"), \"show_items\": self._select_val(\"char\", \"show_items\"),", "\"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))), } # Added for dclone ip", "what a hack... here it is, not making the effort", "\"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")), } self.sorceress =", "int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\",", "= configparser.ConfigParser() self._config.read('config/params.ini') self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser()", "float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")), \"tp\": self._select_val(\"char\", \"tp\"), \"belt_rows\": int(self._select_val(\"char\",", "= self._config[\"barbarian\"] if \"barbarian\" in self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options = {", "= int(self._select_val(\"items\", key)) if self.items[key] and not os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings:", "\"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\": self._select_val(\"general\", \"exit_key\"), \"resume_key\": self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\":", "self.path[key] = np.reshape(np.array([int(x) for x in self._select_val(\"path\", key).split(\",\")]), (-1, 2))", "float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\",", "= Config(print_warnings=True) # Check if any added items miss templates", "in self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian = self._config[\"barbarian\"] if \"barbarian\" in self._custom:", "float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\",", "os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings: print(f\"Warning: You activated {key} in pickit, but", "\"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\":", "= bool(int(self._select_val(\"routes\", key))) self.char = { \"type\": self._select_val(\"char\", \"type\"), \"show_items\":", "float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\",", "\"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"), \"name\": self._select_val(\"general\", \"name\"), \"monitor\": int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\":", "\"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))),", "self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\": False if not self._select_val(\"general\", \"discord_status_count\") else int(self._select_val(\"general\",", "\"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")),", "item_name = filename[:-4] blacklist_item = item_name.startswith(\"bl__\") if item_name not in", "for x in self._select_val(\"path\", key).split(\",\")]), (-1, 2)) self.shop = {", "float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\",", "configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser() if os.environ.get('RUN_ENV') != \"test\" and", "# print_warnings, what a hack... here it is, not making", "self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin = self._config[\"trapsin\"] if \"trapsin\" in self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian", "bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\",", "= { \"region_ips\": self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"), } self.routes", "instance through bites me in the ass self._print_warnings = print_warnings", "int(self._select_val(\"ui_pos\", key)) self.ui_roi = {} for key in self._game_config[\"ui_roi\"]: self.ui_roi[key]", "items miss templates for k in config.items: if not os.path.exists(f\"./assets/items/{k}.png\"):", "\"potion3\": self._select_val(\"char\", \"potion3\"), \"potion4\": self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\":", "Check if any item templates miss a config for filename", "\"hammerdin\" in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin = self._config[\"trapsin\"] if \"trapsin\" in", "miss a config for filename in os.listdir(f'assets/items'): filename = filename.lower()", "\"merc_chicken\")), \"tp\": self._select_val(\"char\", \"tp\"), \"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\": self._select_val(\"char\", \"show_belt\"),", "self._pickit_config[\"items\"]: self.items[key] = int(self._select_val(\"items\", key)) if self.items[key] and not os.path.exists(f\"./assets/items/{key}.png\")", "self.general = { \"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"), \"name\": self._select_val(\"general\", \"name\"), \"monitor\":", "\"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"),", "\"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\":", "\"difficulty\": self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\": False if not", "# passing a single config instance through bites me in", "\"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\":", "{ \"type\": self._select_val(\"char\", \"type\"), \"show_items\": self._select_val(\"char\", \"show_items\"), \"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"),", "\"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\": self._select_val(\"char\", \"battle_orders\"), \"battle_command\":", "if not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not found: {k}\") # Check if", "self._select_val(\"general\", \"saved_games_folder\"), \"name\": self._select_val(\"general\", \"name\"), \"monitor\": int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\",", "\"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\":", "section: str, key: str = None): if section in self._custom", "for k in config.items: if not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not found:", "= np.array([int(x) for x in self._select_val(\"ui_roi\", key).split(\",\")]) self.path = {}", "\"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))), } # Added", "\"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\": self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"),", "\"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")),", "= int(self._select_val(\"ui_pos\", key)) self.ui_roi = {} for key in self._game_config[\"ui_roi\"]:", "in assets/items\") self.colors = {} for key in self._game_config[\"colors\"]: self.colors[key]", "if self.items[key] and not os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings: print(f\"Warning: You activated", "self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser() if os.environ.get('RUN_ENV') != \"test\" and os.path.exists('config/custom.ini'):", "float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")), } self.sorceress", "int(self._select_val(\"items\", key)) if self.items[key] and not os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings: print(f\"Warning:", "self._config[\"hammerdin\"] if \"hammerdin\" in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin = self._config[\"trapsin\"] if", "{} for key in self._game_config[\"path\"]: self.path[key] = np.reshape(np.array([int(x) for x", "making the effort # passing a single config instance through", "key in self._config[\"routes\"]: self.routes[key] = bool(int(self._select_val(\"routes\", key))) self.char = {", "self._config: return self._config[section][key] elif section in self._pickit_config: return self._pickit_config[section][key] elif", "\"belt_rows\")), \"show_belt\": self._select_val(\"char\", \"show_belt\"), \"potion1\": self._select_val(\"char\", \"potion1\"), \"potion2\": self._select_val(\"char\", \"potion2\"),", "== \"__main__\": config = Config(print_warnings=True) # Check if any added", "\"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")),", "configparser.ConfigParser() self._config.read('config/params.ini') self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini')", "\"battle_command\": self._select_val(\"char\", \"battle_command\"), \"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\":", "self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config =", "\"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\":", "filename.endswith('.png'): item_name = filename[:-4] blacklist_item = item_name.startswith(\"bl__\") if item_name not", "bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")), } if", "through bites me in the ass self._print_warnings = print_warnings self._config", "bool = False): # print_warnings, what a hack... here it", "bool(int(self._select_val(\"routes\", key))) self.char = { \"type\": self._select_val(\"char\", \"type\"), \"show_items\": self._select_val(\"char\",", "= self._config[\"hammerdin\"] if \"hammerdin\" in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin = self._config[\"trapsin\"]", "in self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options = { \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1),", "\"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))),", "\"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))),", "passing a single config instance through bites me in the", "dict(self._config[\"sorceress\"]) if \"sorceress\" in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin = self._config[\"hammerdin\"] if", "in the ass self._print_warnings = print_warnings self._config = configparser.ConfigParser() self._config.read('config/params.ini')", "in pickit, but there is no img available in assets/items\")", "self._custom.read('config/custom.ini') self.general = { \"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"), \"name\": self._select_val(\"general\", \"name\"),", "bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\": self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\": False", "self._select_val(\"char\", \"type\"), \"show_items\": self._select_val(\"char\", \"show_items\"), \"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"), \"stand_still\": self._select_val(\"char\",", "{ \"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"), \"name\": self._select_val(\"general\", \"name\"), \"monitor\": int(self._select_val(\"general\", \"monitor\")),", "x in self._select_val(\"path\", key).split(\",\")]), (-1, 2)) self.shop = { \"shop_trap_claws\":", "self._shop_config[section][key] else: return self._game_config[section][key] def __init__(self, print_warnings: bool = False):", "available in assets/items\") self.colors = {} for key in self._game_config[\"colors\"]:", "and not os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings: print(f\"Warning: You activated {key} in", "activated {key} in pickit, but there is no img available", "\"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"), \"stand_still\": self._select_val(\"char\", \"stand_still\"), \"force_move\": self._select_val(\"char\", \"force_move\"), \"num_loot_columns\":", "single config instance through bites me in the ass self._print_warnings", "self._config[\"barbarian\"] if \"barbarian\" in self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options = { \"pathing_delay_factor\":", "\"dclone_hotip\"), } self.routes = {} for key in self._config[\"routes\"]: self.routes[key]", "\"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\": self._select_val(\"char\", \"battle_orders\"), \"battle_command\": self._select_val(\"char\", \"battle_command\"), \"casting_frames\":", "\"battle_orders\": self._select_val(\"char\", \"battle_orders\"), \"battle_command\": self._select_val(\"char\", \"battle_command\"), \"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\":", "print_warnings: bool = False): # print_warnings, what a hack... here", "\"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\":", "if not self._select_val(\"general\", \"discord_status_count\") else int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))),", "bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\": self._select_val(\"char\",", "float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\",", "int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\",", "self.ui_roi = {} for key in self._game_config[\"ui_roi\"]: self.ui_roi[key] = np.array([int(x)", "self.routes = {} for key in self._config[\"routes\"]: self.routes[key] = bool(int(self._select_val(\"routes\",", "and self._print_warnings: print(f\"Warning: You activated {key} in pickit, but there", "\"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")),", "float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\",", "\"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")), }", "\"__main__\": config = Config(print_warnings=True) # Check if any added items", "blacklist_item = item_name.startswith(\"bl__\") if item_name not in config.items and not", "self._select_val(\"general\", \"discord_status_count\") else int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\",", "\"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")),", "os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general = { \"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"), \"name\": self._select_val(\"general\",", "key).split(\",\")]), 2) self.ui_pos = {} for key in self._game_config[\"ui_pos\"]: self.ui_pos[key]", "in config.items and not blacklist_item: print(f\"Config not found for: \"", "False): # print_warnings, what a hack... here it is, not", "self._select_val(\"colors\", key).split(\",\")]), 2) self.ui_pos = {} for key in self._game_config[\"ui_pos\"]:", "in self._custom and key in self._custom[section]: return self._custom[section][key] elif section", "in self._select_val(\"path\", key).split(\",\")]), (-1, 2)) self.shop = { \"shop_trap_claws\": bool(int(self._select_val(\"claws\",", "\"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), } self.items = {} for key in", "else: return self._game_config[section][key] def __init__(self, print_warnings: bool = False): #", "import os class Config: def _select_val(self, section: str, key: str", "if item_name not in config.items and not blacklist_item: print(f\"Config not", "self.dclone = { \"region_ips\": self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"), }", "item_name.startswith(\"bl__\") if item_name not in config.items and not blacklist_item: print(f\"Config", "self.hammerdin = self._config[\"hammerdin\"] if \"hammerdin\" in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin =", "= { \"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"), \"name\": self._select_val(\"general\", \"name\"), \"monitor\": int(self._select_val(\"general\",", "self._select_val(\"char\", \"battle_orders\"), \"battle_command\": self._select_val(\"char\", \"battle_command\"), \"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\",", "in self._config[\"routes\"]: self.routes[key] = bool(int(self._select_val(\"routes\", key))) self.char = { \"type\":", "= {} for key in self._game_config[\"ui_pos\"]: self.ui_pos[key] = int(self._select_val(\"ui_pos\", key))", "for key in self._config[\"routes\"]: self.routes[key] = bool(int(self._select_val(\"routes\", key))) self.char =", "Added for dclone ip hunting self.dclone = { \"region_ips\": self._select_val(\"dclone\",", "10), \"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))),", "self.colors = {} for key in self._game_config[\"colors\"]: self.colors[key] = np.split(np.array([int(x)", "print(f\"Template not found: {k}\") # Check if any item templates", "\"discord_status_count\") else int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))),", "int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\": self._select_val(\"general\", \"exit_key\"), \"resume_key\": self._select_val(\"general\",", "self._config[\"routes\"]: self.routes[key] = bool(int(self._select_val(\"routes\", key))) self.char = { \"type\": self._select_val(\"char\",", "self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian = self._config[\"barbarian\"] if \"barbarian\" in self._custom: self.barbarian.update(self._custom[\"barbarian\"])", "\"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\":", "self._select_val(\"char\", \"stand_still\"), \"force_move\": self._select_val(\"char\", \"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\",", "found: {k}\") # Check if any item templates miss a", "self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\": self._select_val(\"general\",", "filename.lower() if filename.endswith('.png'): item_name = filename[:-4] blacklist_item = item_name.startswith(\"bl__\") if", "int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\",", "filename[:-4] blacklist_item = item_name.startswith(\"bl__\") if item_name not in config.items and", "{key} in pickit, but there is no img available in", "\"battle_command\"), \"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")),", "configparser import numpy as np import os class Config: def", "np.split(np.array([int(x) for x in self._select_val(\"colors\", key).split(\",\")]), 2) self.ui_pos = {}", "bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\",", "= { \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1), 10), \"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"),", "__init__(self, print_warnings: bool = False): # print_warnings, what a hack...", "bool(int(self._select_val(\"general\", \"loot_screenshots\"))), } # Added for dclone ip hunting self.dclone", "def __init__(self, print_warnings: bool = False): # print_warnings, what a", "\"test\" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general = { \"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"),", "configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser() if", "\"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))),", "\"potion1\"), \"potion2\": self._select_val(\"char\", \"potion2\"), \"potion3\": self._select_val(\"char\", \"potion3\"), \"potion4\": self._select_val(\"char\", \"potion4\"),", "float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\": float(self._select_val(\"char\",", "self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options = { \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1), 10), \"message_headers\":", "templates miss a config for filename in os.listdir(f'assets/items'): filename =", "\"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))), } # Added for", "# Added for dclone ip hunting self.dclone = { \"region_ips\":", "self._pickit_config[section][key] elif section in self._shop_config: return self._shop_config[section][key] else: return self._game_config[section][key]", "\"cta_available\"))), \"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\": self._select_val(\"char\", \"battle_orders\"), \"battle_command\": self._select_val(\"char\", \"battle_command\"),", "int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))), } #", "\"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))),", "{} for key in self._game_config[\"ui_pos\"]: self.ui_pos[key] = int(self._select_val(\"ui_pos\", key)) self.ui_roi", "\"potion2\": self._select_val(\"char\", \"potion2\"), \"potion3\": self._select_val(\"char\", \"potion3\"), \"potion4\": self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\":", "\"difficulty\"), \"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\": False if not self._select_val(\"general\", \"discord_status_count\")", "self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options = { \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1), 10),", "\"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\":", "return self._config[section][key] elif section in self._pickit_config: return self._pickit_config[section][key] elif section", "key)) self.ui_roi = {} for key in self._game_config[\"ui_roi\"]: self.ui_roi[key] =", "= configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser() if os.environ.get('RUN_ENV') != \"test\"", "for key in self._game_config[\"ui_pos\"]: self.ui_pos[key] = int(self._select_val(\"ui_pos\", key)) self.ui_roi =", "\"loot_screenshots\"))), } # Added for dclone ip hunting self.dclone =", "self._print_warnings: print(f\"Warning: You activated {key} in pickit, but there is", "= filename.lower() if filename.endswith('.png'): item_name = filename[:-4] blacklist_item = item_name.startswith(\"bl__\")", "bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))), } # Added for dclone", "self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom", "config.items and not blacklist_item: print(f\"Config not found for: \" +", "self._select_val(\"char\", \"show_belt\"), \"potion1\": self._select_val(\"char\", \"potion1\"), \"potion2\": self._select_val(\"char\", \"potion2\"), \"potion3\": self._select_val(\"char\",", "numpy as np import os class Config: def _select_val(self, section:", "\"monitor\": int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\": self._select_val(\"general\", \"exit_key\"), \"resume_key\":", "\"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")), \"tp\": self._select_val(\"char\", \"tp\"), \"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\":", "\"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")),", "\"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\":", "in self._game_config[\"path\"]: self.path[key] = np.reshape(np.array([int(x) for x in self._select_val(\"path\", key).split(\",\")]),", "x in self._select_val(\"colors\", key).split(\",\")]), 2) self.ui_pos = {} for key", "self._config = configparser.ConfigParser() self._config.read('config/params.ini') self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config =", "miss templates for k in config.items: if not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template", "\"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\": float(self._select_val(\"char\", \"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")),", "(-1, 2)) self.shop = { \"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\",", "Check if any added items miss templates for k in", "section in self._shop_config: return self._shop_config[section][key] else: return self._game_config[section][key] def __init__(self,", "\"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\":", "\"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\": self._select_val(\"char\", \"battle_orders\"),", "self._select_val(\"dclone\", \"dclone_hotip\"), } self.routes = {} for key in self._config[\"routes\"]:", "the effort # passing a single config instance through bites", "\"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\":", "elif section in self._shop_config: return self._shop_config[section][key] else: return self._game_config[section][key] def", "\"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), } self.items = {} for key", "and key in self._custom[section]: return self._custom[section][key] elif section in self._config:", "ip hunting self.dclone = { \"region_ips\": self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\",", "\"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")),", "self.ui_roi[key] = np.array([int(x) for x in self._select_val(\"ui_roi\", key).split(\",\")]) self.path =", "{} for key in self._config[\"routes\"]: self.routes[key] = bool(int(self._select_val(\"routes\", key))) self.char", "elif section in self._pickit_config: return self._pickit_config[section][key] elif section in self._shop_config:", "\"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\":", "\"type\": self._select_val(\"char\", \"type\"), \"show_items\": self._select_val(\"char\", \"show_items\"), \"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"), \"stand_still\":", "\"potion4\": self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\":", "2) self.ui_pos = {} for key in self._game_config[\"ui_pos\"]: self.ui_pos[key] =", "in self._game_config[\"ui_pos\"]: self.ui_pos[key] = int(self._select_val(\"ui_pos\", key)) self.ui_roi = {} for", "None): if section in self._custom and key in self._custom[section]: return", "self._game_config[\"ui_pos\"]: self.ui_pos[key] = int(self._select_val(\"ui_pos\", key)) self.ui_roi = {} for key", "not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not found: {k}\") # Check if any", "configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini')", "self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), } self.items = {} for", "not self._select_val(\"general\", \"discord_status_count\") else int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\":", "config instance through bites me in the ass self._print_warnings =", "self.items[key] and not os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings: print(f\"Warning: You activated {key}", "it is, not making the effort # passing a single", "self.advanced_options = { \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1), 10), \"message_headers\": self._select_val(\"advanced_options\",", "and not blacklist_item: print(f\"Config not found for: \" + filename)", "\"randomize_runs\"))), \"difficulty\": self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\": False if", "= None): if section in self._custom and key in self._custom[section]:", "\"type\"), \"show_items\": self._select_val(\"char\", \"show_items\"), \"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"), \"stand_still\": self._select_val(\"char\", \"stand_still\"),", "self._game_config[\"path\"]: self.path[key] = np.reshape(np.array([int(x) for x in self._select_val(\"path\", key).split(\",\")]), (-1,", "float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\",", "str = None): if section in self._custom and key in", "\"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))), } # Added for dclone ip hunting", "\"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1), 10), \"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\",", "Config(print_warnings=True) # Check if any added items miss templates for", "img available in assets/items\") self.colors = {} for key in", "\"hork_time_council\")), \"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")), } self.sorceress = dict(self._config[\"sorceress\"]) if \"sorceress\"", "no img available in assets/items\") self.colors = {} for key", "= {} for key in self._game_config[\"colors\"]: self.colors[key] = np.split(np.array([int(x) for", "self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\",", "self._select_val(\"char\", \"inventory_screen\"), \"stand_still\": self._select_val(\"char\", \"stand_still\"), \"force_move\": self._select_val(\"char\", \"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\",", "{} for key in self._pickit_config[\"items\"]: self.items[key] = int(self._select_val(\"items\", key)) if", "pickit, but there is no img available in assets/items\") self.colors", "config = Config(print_warnings=True) # Check if any added items miss", "added items miss templates for k in config.items: if not", "self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\",", "\"pathing_delay_factor\")), 1), 10), \"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\":", "\"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")), } if __name__", "and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general = { \"saved_games_folder\": self._select_val(\"general\", \"saved_games_folder\"), \"name\":", "not in config.items and not blacklist_item: print(f\"Config not found for:", "but there is no img available in assets/items\") self.colors =", "item_name not in config.items and not blacklist_item: print(f\"Config not found", "= { \"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\",", "\"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\":", "= {} for key in self._game_config[\"path\"]: self.path[key] = np.reshape(np.array([int(x) for", "__name__ == \"__main__\": config = Config(print_warnings=True) # Check if any", "= configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser()", "self.path = {} for key in self._game_config[\"path\"]: self.path[key] = np.reshape(np.array([int(x)", "\"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"), } self.routes = {} for key in", "\"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")),", "k in config.items: if not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not found: {k}\")", "= configparser.ConfigParser() if os.environ.get('RUN_ENV') != \"test\" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general", "self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\",", "\"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\":", "\"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\", \"melee_min_score\")), } if __name__ == \"__main__\": config", "} if __name__ == \"__main__\": config = Config(print_warnings=True) # Check", "a hack... here it is, not making the effort #", "key in self._pickit_config[\"items\"]: self.items[key] = int(self._select_val(\"items\", key)) if self.items[key] and", "os.listdir(f'assets/items'): filename = filename.lower() if filename.endswith('.png'): item_name = filename[:-4] blacklist_item", "key in self._game_config[\"ui_roi\"]: self.ui_roi[key] = np.array([int(x) for x in self._select_val(\"ui_roi\",", "\"sorceress\" in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin = self._config[\"hammerdin\"] if \"hammerdin\" in", "self._select_val(\"path\", key).split(\",\")]), (-1, 2)) self.shop = { \"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))),", "x in self._select_val(\"ui_roi\", key).split(\",\")]) self.path = {} for key in", "str, key: str = None): if section in self._custom and", "return self._custom[section][key] elif section in self._config: return self._config[section][key] elif section", "= np.reshape(np.array([int(x) for x in self._select_val(\"path\", key).split(\",\")]), (-1, 2)) self.shop", "self._pickit_config: return self._pickit_config[section][key] elif section in self._shop_config: return self._shop_config[section][key] else:", "return self._shop_config[section][key] else: return self._game_config[section][key] def __init__(self, print_warnings: bool =", "_select_val(self, section: str, key: str = None): if section in", "\"max_game_length_s\")), \"exit_key\": self._select_val(\"general\", \"exit_key\"), \"resume_key\": self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"),", "\"show_items\"), \"inventory_screen\": self._select_val(\"char\", \"inventory_screen\"), \"stand_still\": self._select_val(\"char\", \"stand_still\"), \"force_move\": self._select_val(\"char\", \"force_move\"),", "1), 10), \"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\",", "} self.items = {} for key in self._pickit_config[\"items\"]: self.items[key] =", "in self._select_val(\"ui_roi\", key).split(\",\")]) self.path = {} for key in self._game_config[\"path\"]:", "self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), } self.items", "\"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), }", "print(f\"Warning: You activated {key} in pickit, but there is no", "\"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\": self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\":", "\"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\": float(self._select_val(\"char\", \"hork_time_shenk\")), \"hork_time_council\":", "int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\",", "bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\",", "not making the effort # passing a single config instance", "{k}\") # Check if any item templates miss a config", "section in self._config: return self._config[section][key] elif section in self._pickit_config: return", "self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser() if os.environ.get('RUN_ENV')", "!= \"test\" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general = { \"saved_games_folder\": self._select_val(\"general\",", "{} for key in self._game_config[\"colors\"]: self.colors[key] = np.split(np.array([int(x) for x", "\"potion1\": self._select_val(\"char\", \"potion1\"), \"potion2\": self._select_val(\"char\", \"potion2\"), \"potion3\": self._select_val(\"char\", \"potion3\"), \"potion4\":", "self.ui_pos[key] = int(self._select_val(\"ui_pos\", key)) self.ui_roi = {} for key in", "self._select_val(\"ui_roi\", key).split(\",\")]) self.path = {} for key in self._game_config[\"path\"]: self.path[key]", "item templates miss a config for filename in os.listdir(f'assets/items'): filename", "key: str = None): if section in self._custom and key", "Config: def _select_val(self, section: str, key: str = None): if", "\"trapsin\" in self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian = self._config[\"barbarian\"] if \"barbarian\" in", "section in self._custom and key in self._custom[section]: return self._custom[section][key] elif", "self.ui_pos = {} for key in self._game_config[\"ui_pos\"]: self.ui_pos[key] = int(self._select_val(\"ui_pos\",", "key).split(\",\")]), (-1, 2)) self.shop = { \"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\":", "\"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\":", "np import os class Config: def _select_val(self, section: str, key:", "self._custom = configparser.ConfigParser() if os.environ.get('RUN_ENV') != \"test\" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini')", "\"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))),", "self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian = self._config[\"barbarian\"] if \"barbarian\" in self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options", "\"resume_key\"), \"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"),", "\"potion3\"), \"potion4\": self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")),", "\"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\":", "else int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\", \"info_screenshots\"))), \"loot_screenshots\": bool(int(self._select_val(\"general\", \"loot_screenshots\"))), }", "import configparser import numpy as np import os class Config:", "the ass self._print_warnings = print_warnings self._config = configparser.ConfigParser() self._config.read('config/params.ini') self._game_config", "\"discord_status_count\": False if not self._select_val(\"general\", \"discord_status_count\") else int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\":", "in self._pickit_config: return self._pickit_config[section][key] elif section in self._shop_config: return self._shop_config[section][key]", "\"exit_key\"), \"resume_key\": self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"),", "\"region_ips\": self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\": self._select_val(\"dclone\", \"dclone_hotip\"), } self.routes = {}", "{ \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")), 1), 10), \"message_headers\": self._select_val(\"advanced_options\", \"message_headers\"), \"message_body_template\":", "key in self._game_config[\"path\"]: self.path[key] = np.reshape(np.array([int(x) for x in self._select_val(\"path\",", "\"weapon_switch\"), \"battle_orders\": self._select_val(\"char\", \"battle_orders\"), \"battle_command\": self._select_val(\"char\", \"battle_command\"), \"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")),", "self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\": self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\": self._select_val(\"general\",", "\"message_headers\"), \"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), } self.items =", "\"exit_key\": self._select_val(\"general\", \"exit_key\"), \"resume_key\": self._select_val(\"general\", \"resume_key\"), \"auto_settings_key\": self._select_val(\"general\", \"auto_settings_key\"), \"graphic_debugger_key\":", "\"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\": False if not self._select_val(\"general\", \"discord_status_count\") else", "2)) self.shop = { \"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))),", "\"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\":", "config.items: if not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not found: {k}\") # Check", "if __name__ == \"__main__\": config = Config(print_warnings=True) # Check if", "for x in self._select_val(\"colors\", key).split(\",\")]), 2) self.ui_pos = {} for", "return self._game_config[section][key] def __init__(self, print_warnings: bool = False): # print_warnings,", "\"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")),", "= filename[:-4] blacklist_item = item_name.startswith(\"bl__\") if item_name not in config.items", "{} for key in self._game_config[\"ui_roi\"]: self.ui_roi[key] = np.array([int(x) for x", "if any added items miss templates for k in config.items:", "filename in os.listdir(f'assets/items'): filename = filename.lower() if filename.endswith('.png'): item_name =", "is no img available in assets/items\") self.colors = {} for", "\"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")),", "if \"hammerdin\" in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin = self._config[\"trapsin\"] if \"trapsin\"", "templates for k in config.items: if not os.path.exists(f\"./assets/items/{k}.png\"): print(f\"Template not", "self.routes[key] = bool(int(self._select_val(\"routes\", key))) self.char = { \"type\": self._select_val(\"char\", \"type\"),", "= {} for key in self._pickit_config[\"items\"]: self.items[key] = int(self._select_val(\"items\", key))", "in self._select_val(\"colors\", key).split(\",\")]), 2) self.ui_pos = {} for key in", "} self.routes = {} for key in self._config[\"routes\"]: self.routes[key] =", "np.array([int(x) for x in self._select_val(\"ui_roi\", key).split(\",\")]) self.path = {} for", "self.barbarian = self._config[\"barbarian\"] if \"barbarian\" in self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options =", "not os.path.exists(f\"./assets/items/{key}.png\") and self._print_warnings: print(f\"Warning: You activated {key} in pickit,", "filename = filename.lower() if filename.endswith('.png'): item_name = filename[:-4] blacklist_item =", "self._custom and key in self._custom[section]: return self._custom[section][key] elif section in", "\"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\":", "key).split(\",\")]) self.path = {} for key in self._game_config[\"path\"]: self.path[key] =", "here it is, not making the effort # passing a", "self.sorceress = dict(self._config[\"sorceress\"]) if \"sorceress\" in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin =", "\"custom_message_hook\"), \"discord_status_count\": False if not self._select_val(\"general\", \"discord_status_count\") else int(self._select_val(\"general\", \"discord_status_count\")),", "\"tp\"), \"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\": self._select_val(\"char\", \"show_belt\"), \"potion1\": self._select_val(\"char\", \"potion1\"),", "for key in self._game_config[\"ui_roi\"]: self.ui_roi[key] = np.array([int(x) for x in", "key in self._game_config[\"colors\"]: self.colors[key] = np.split(np.array([int(x) for x in self._select_val(\"colors\",", "float(self._select_val(\"char\", \"take_rejuv_potion_mana\")), \"heal_merc\": float(self._select_val(\"char\", \"heal_merc\")), \"heal_rejuv_merc\": float(self._select_val(\"char\", \"heal_rejuv_merc\")), \"chicken\": float(self._select_val(\"char\",", "float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\",", "bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\",", "\"show_belt\": self._select_val(\"char\", \"show_belt\"), \"potion1\": self._select_val(\"char\", \"potion1\"), \"potion2\": self._select_val(\"char\", \"potion2\"), \"potion3\":", "if filename.endswith('.png'): item_name = filename[:-4] blacklist_item = item_name.startswith(\"bl__\") if item_name", "\"message_highlight\"))), } self.items = {} for key in self._pickit_config[\"items\"]: self.items[key]", "} # Added for dclone ip hunting self.dclone = {", "} self.sorceress = dict(self._config[\"sorceress\"]) if \"sorceress\" in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"])) self.hammerdin", "\"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")),", "\"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\": float(self._select_val(\"char\", \"hork_time_pindle\")), \"hork_time_eldritch\": float(self._select_val(\"char\", \"hork_time_eldritch\")), \"hork_time_shenk\":", "in self._pickit_config[\"items\"]: self.items[key] = int(self._select_val(\"items\", key)) if self.items[key] and not", "\"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\": self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\":", "os class Config: def _select_val(self, section: str, key: str =", "float(self._select_val(\"char\", \"atk_len_trav\")), \"atk_len_pindle\": float(self._select_val(\"char\", \"atk_len_pindle\")), \"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\",", "self._shop_config: return self._shop_config[section][key] else: return self._game_config[section][key] def __init__(self, print_warnings: bool", "\"hork_time_nihlatak\")), } self.sorceress = dict(self._config[\"sorceress\"]) if \"sorceress\" in self._custom: self.sorceress.update(dict(self._custom[\"sorceress\"]))", "self._custom[section][key] elif section in self._config: return self._config[section][key] elif section in", "\"chicken\": float(self._select_val(\"char\", \"chicken\")), \"merc_chicken\": float(self._select_val(\"char\", \"merc_chicken\")), \"tp\": self._select_val(\"char\", \"tp\"), \"belt_rows\":", "assets/items\") self.colors = {} for key in self._game_config[\"colors\"]: self.colors[key] =", "\"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\", \"belt_rejuv_columns\")), \"belt_hp_columns\": int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")),", "self._config.read('config/params.ini') self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini') self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config", "\"hork_time_nihlatak\": float(self._select_val(\"char\", \"hork_time_nihlatak\")), } self.sorceress = dict(self._config[\"sorceress\"]) if \"sorceress\" in", "\"name\"), \"monitor\": int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\": self._select_val(\"general\", \"exit_key\"),", "= { \"type\": self._select_val(\"char\", \"type\"), \"show_items\": self._select_val(\"char\", \"show_items\"), \"inventory_screen\": self._select_val(\"char\",", "bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), } self.items = {} for key in self._pickit_config[\"items\"]:", "\"barbarian\" in self._custom: self.barbarian.update(self._custom[\"barbarian\"]) self.advanced_options = { \"pathing_delay_factor\": min(max(int(self._select_val(\"advanced_options\", \"pathing_delay_factor\")),", "\"atk_len_eldritch\": float(self._select_val(\"char\", \"atk_len_eldritch\")), \"atk_len_shenk\": float(self._select_val(\"char\", \"atk_len_shenk\")), \"atk_len_nihlatak\": float(self._select_val(\"char\", \"atk_len_nihlatak\")), \"hork_time_pindle\":", "= print_warnings self._config = configparser.ConfigParser() self._config.read('config/params.ini') self._game_config = configparser.ConfigParser() self._game_config.read('config/game.ini')", "self.shop = { \"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\":", "\"show_belt\"), \"potion1\": self._select_val(\"char\", \"potion1\"), \"potion2\": self._select_val(\"char\", \"potion2\"), \"potion3\": self._select_val(\"char\", \"potion3\"),", "\"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\", \"gold_trav_only\"))), \"use_merc\": bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))),", "bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\": self._select_val(\"char\", \"weapon_switch\"), \"battle_orders\": self._select_val(\"char\", \"battle_orders\"), \"battle_command\": self._select_val(\"char\",", "= {} for key in self._game_config[\"ui_roi\"]: self.ui_roi[key] = np.array([int(x) for", "self._select_val(\"char\", \"potion1\"), \"potion2\": self._select_val(\"char\", \"potion2\"), \"potion3\": self._select_val(\"char\", \"potion3\"), \"potion4\": self._select_val(\"char\",", "float(self._select_val(\"char\", \"hork_time_nihlatak\")), } self.sorceress = dict(self._config[\"sorceress\"]) if \"sorceress\" in self._custom:", "self._custom[section]: return self._custom[section][key] elif section in self._config: return self._config[section][key] elif", "= item_name.startswith(\"bl__\") if item_name not in config.items and not blacklist_item:", "in self._custom: self.hammerdin.update(self._custom[\"hammerdin\"]) self.trapsin = self._config[\"trapsin\"] if \"trapsin\" in self._custom:", "self.trapsin = self._config[\"trapsin\"] if \"trapsin\" in self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian =", "bites me in the ass self._print_warnings = print_warnings self._config =", "\"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\":", "self._pickit_config = configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom =", "\"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\": self._select_val(\"general\", \"difficulty\"),", "os.environ.get('RUN_ENV') != \"test\" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general = { \"saved_games_folder\":", "for key in self._pickit_config[\"items\"]: self.items[key] = int(self._select_val(\"items\", key)) if self.items[key]", "as np import os class Config: def _select_val(self, section: str,", "\"force_move\": self._select_val(\"char\", \"force_move\"), \"num_loot_columns\": int(self._select_val(\"char\", \"num_loot_columns\")), \"take_health_potion\": float(self._select_val(\"char\", \"take_health_potion\")), \"take_mana_potion\":", "self._select_val(\"general\", \"difficulty\"), \"custom_message_hook\": self._select_val(\"general\", \"custom_message_hook\"), \"discord_status_count\": False if not self._select_val(\"general\",", "for dclone ip hunting self.dclone = { \"region_ips\": self._select_val(\"dclone\", \"region_ips\"),", "int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\": self._select_val(\"char\", \"show_belt\"), \"potion1\": self._select_val(\"char\", \"potion1\"), \"potion2\": self._select_val(\"char\",", "\"tp\": self._select_val(\"char\", \"tp\"), \"belt_rows\": int(self._select_val(\"char\", \"belt_rows\")), \"show_belt\": self._select_val(\"char\", \"show_belt\"), \"potion1\":", "False if not self._select_val(\"general\", \"discord_status_count\") else int(self._select_val(\"general\", \"discord_status_count\")), \"info_screenshots\": bool(int(self._select_val(\"general\",", "int(self._select_val(\"char\", \"belt_hp_columns\")), \"belt_mp_columns\": int(self._select_val(\"char\", \"belt_mp_columns\")), \"stash_gold\": bool(int(self._select_val(\"char\", \"stash_gold\"))), \"gold_trav_only\": bool(int(self._select_val(\"char\",", "key in self._custom[section]: return self._custom[section][key] elif section in self._config: return", "in self._config: return self._config[section][key] elif section in self._pickit_config: return self._pickit_config[section][key]", "for key in self._game_config[\"colors\"]: self.colors[key] = np.split(np.array([int(x) for x in", "self._select_val(\"general\", \"name\"), \"monitor\": int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\": self._select_val(\"general\",", "self._select_val(\"char\", \"potion2\"), \"potion3\": self._select_val(\"char\", \"potion3\"), \"potion4\": self._select_val(\"char\", \"potion4\"), \"belt_rejuv_columns\": int(self._select_val(\"char\",", "\"name\": self._select_val(\"general\", \"name\"), \"monitor\": int(self._select_val(\"general\", \"monitor\")), \"max_game_length_s\": float(self._select_val(\"general\", \"max_game_length_s\")), \"exit_key\":", "self.items = {} for key in self._pickit_config[\"items\"]: self.items[key] = int(self._select_val(\"items\",", "def _select_val(self, section: str, key: str = None): if section", "self._game_config[section][key] def __init__(self, print_warnings: bool = False): # print_warnings, what", "{ \"shop_trap_claws\": bool(int(self._select_val(\"claws\", \"shop_trap_claws\"))), \"shop_melee_claws\": bool(int(self._select_val(\"claws\", \"shop_melee_claws\"))), \"shop_3_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))),", "effort # passing a single config instance through bites me", "config for filename in os.listdir(f'assets/items'): filename = filename.lower() if filename.endswith('.png'):", "self._print_warnings = print_warnings self._config = configparser.ConfigParser() self._config.read('config/params.ini') self._game_config = configparser.ConfigParser()", "if os.environ.get('RUN_ENV') != \"test\" and os.path.exists('config/custom.ini'): self._custom.read('config/custom.ini') self.general = {", "\"graphic_debugger_key\": self._select_val(\"general\", \"graphic_debugger_key\"), \"logg_lvl\": self._select_val(\"general\", \"logg_lvl\"), \"randomize_runs\": bool(int(self._select_val(\"general\", \"randomize_runs\"))), \"difficulty\":", "dclone ip hunting self.dclone = { \"region_ips\": self._select_val(\"dclone\", \"region_ips\"), \"dclone_hotip\":", "bool(int(self._select_val(\"gloves\", \"shop_3_skills_ias_gloves\"))), \"shop_2_skills_ias_gloves\": bool(int(self._select_val(\"gloves\", \"shop_2_skills_ias_gloves\"))), \"trap_min_score\": int(self._select_val(\"claws\", \"trap_min_score\")), \"melee_min_score\": int(self._select_val(\"claws\",", "bool(int(self._select_val(\"char\", \"use_merc\"))), \"pre_buff_every_run\": bool(int(self._select_val(\"char\", \"pre_buff_every_run\"))), \"cta_available\": bool(int(self._select_val(\"char\", \"cta_available\"))), \"weapon_switch\": self._select_val(\"char\",", "\"take_health_potion\")), \"take_mana_potion\": float(self._select_val(\"char\", \"take_mana_potion\")), \"take_rejuv_potion_health\": float(self._select_val(\"char\", \"take_rejuv_potion_health\")), \"take_rejuv_potion_mana\": float(self._select_val(\"char\", \"take_rejuv_potion_mana\")),", "\"battle_orders\"), \"battle_command\": self._select_val(\"char\", \"battle_command\"), \"casting_frames\": int(self._select_val(\"char\", \"casting_frames\")), \"atk_len_trav\": float(self._select_val(\"char\", \"atk_len_trav\")),", "= configparser.ConfigParser() self._pickit_config.read('config/pickit.ini') self._shop_config = configparser.ConfigParser() self._shop_config.read('config/shop.ini') self._custom = configparser.ConfigParser()", "class Config: def _select_val(self, section: str, key: str = None):", "= self._config[\"trapsin\"] if \"trapsin\" in self._custom: self.trapsin.update(self._custom[\"trapsin\"]) self.barbarian = self._config[\"barbarian\"]", "\"message_body_template\": self._select_val(\"advanced_options\", \"message_body_template\"), \"message_highlight\": bool(int(self._select_val(\"advanced_options\", \"message_highlight\"))), } self.items = {}", "self.items[key] = int(self._select_val(\"items\", key)) if self.items[key] and not os.path.exists(f\"./assets/items/{key}.png\") and" ]
[ "{ \"sqrthann\": sqrthann, \"hann\": th.hann_window, \"hamm\": th.hamming_window, \"blackman\": th.blackman_window, \"bartlett\":", "frame_hop: frame hop size in number samples onesided: return half", "\"\"\" return _forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class", "make K^H * K = I S = B**0.5 else:", "FFT size pre_emphasis: factor of preemphasis normalized: use normalized DFT", "True, num_bins: Optional[int] = None, sr: int = 16000, num_mels:", "= init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return", "\"complex\": return (real, imag) elif output == \"real\": return th.stack([real,", "imag], dim=1) # N x 1 x T s =", "true, choose round(#power_of_two) as the FFT size normalized: return normalized", "Tensor frame_hop: frame hop size in number samples pre_emphasis: factor", "output format: {output}\") if wav_dim not in [2, 3]: raise", "self.frame_len): raise RuntimeError( f\"Audio samples less than frame_len ({self.frame_len})\") kernel_size", "in range(-lctx, rctx + 1): idx = th.arange(c, c +", "Tensor]), STFT transform results \"\"\" wav_dim = wav.dim() if output", "Hz) norm: normalize the mel filter coefficients \"\"\" # FFT", "-> th.Tensor: \"\"\" Return mel filter coefficients Args: frame_len: length", "F x D splice = th.stack(ctx, -1) return splice def", "K = th.fft(I / S, 1) if mode == \"kaldi\":", "= np.sinc( times * zeros_per_block) * window * zeros_per_block /", "str = \"polar\", frame_hop: int = 256, onesided: bool =", "librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size", "T % subsampling_factor for c in range(-lctx, rctx + 1):", "None, None] / float(dst_sr) - np.arange(src_sr)[None, :, None] / float(src_sr)", "0][None, ...], frames) else: packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)", "hop size between frames window: window name round_pow_of_two: if true,", "# make K^H * K = I S = B**0.5", "idx)) if op == \"cat\": # N x ... x", "% subsampling_factor for c in range(-lctx, rctx + 1): idx", "**kwargs) def forward( self, wav: th.Tensor, output: str = \"polar\"", "use normalized DFT kernel onesided: output onesided STFT inverse: using", "+ f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \" + f\"center={self.center}, mode={self.mode}, \" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\")", "imag] Tensor frame_hop: frame hop size in number samples onesided:", "2 window = tf.pad(window, (lpad, B - frame_len - lpad))", "x 1 x W I = th.eye(window.shape[0], device=win.device)[:, None] #", "from typing import Optional, Union, Tuple def init_window(wnd: str, frame_len:", "round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis,", "bool = False) -> th.Tensor: \"\"\" Return mel filter coefficients", "kernels, from init_kernel(...) output (str), output format: polar: return (magnitude,", "choose round(#power_of_two) as the FFT size pre_emphasis: factor of preemphasis", "str, frame_len: int) -> th.Tensor: \"\"\" Return window coefficient Args:", "1) ctx.append(th.index_select(feats, -2, idx)) if op == \"cat\": # N", "= s[..., pad:-pad] norm = norm[..., pad:-pad] s = s", "False, onesided: bool = True, center: bool = False, mode:", "2B x 1 x W K = th.reshape(K, (B *", "else: reshape NC x 1 x S N, S =", "-> th.Tensor: \"\"\" Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args:", "\"\"\" if isinstance(transform, th.Tensor): device = transform.device else: device =", "W, NC x W x T, NC x 2B x", "+ imag**2 + EPSILON)**0.5 pha = th.atan2(imag, real) return (mag,", "4 + 1 real = real[..., :num_bins, :] imag =", "th.Tensor): device = transform.device else: device = transform[0].device K, w", "self).__init__(*args, inverse=False, **kwargs) def forward( self, wav: th.Tensor, output: str", "or kaldi) \"\"\" if mode not in [\"librosa\", \"kaldi\"]: raise", "self.center = center self.mode = mode self.num_bins = self.K.shape[0] //", "not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {input}\")", "if th.sum(wav_len <= self.frame_len): raise RuntimeError( f\"Audio samples less than", "dst_sr: {src_sr}/{dst_sr}\") gcd = math.gcd(src_sr, dst_sr) src_sr = src_sr //", "T if imag_dim == 2: real = th.unsqueeze(real, 0) imag", "1 self.expr = ( f\"window={window}, stride={frame_hop}, onesided={onesided}, \" + f\"pre_emphasis={self.pre_emphasis},", "transform # (N) x F x T imag_dim = imag.dim()", "1] elif input == \"polar\": real = transform[0] * th.cos(transform[1])", "onesided=onesided, center=center) class STFTBase(nn.Module): \"\"\" Base layer for (i)STFT Args:", "or [Tensor, Tensor]), STFT output Return s (Tensor), N x", "round(#power_of_two) as the FFT size num_bins: number of the frequency", "None] / float(dst_sr) - np.arange(src_sr)[None, :, None] / float(src_sr) -", "= th.eye(window.shape[0], device=win.device)[:, None] # 1 x 1 x T", "T, device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0, max=T - 1)", "function \"\"\" if isinstance(transform, th.Tensor): device = transform.device else: device", "-1) else: # N x ... x T x F", "https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W x T win = th.repeat_interleave(window[None,", "2D/3D tensor, but got {imag_dim}D\") # if F x T,", "th.Tensor: \"\"\" Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr:", "Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window: th.Tensor, input: str = \"polar\",", "packed.shape[-1], dim=-1) # W x 1 x W I =", "frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:]", "def forward(self, transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input: str = \"polar\")", "wav_dim == 3: packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1]) #", "splice (Tensor): feature with context padded \"\"\" if lctx +", "W x T win = th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1)", "to make K^H * K = I K = K", "of the frame frame_hop: hop size between frames window: window", "// 2 + 1) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int,", "in [\"bartlett\", \"hann\", \"hamm\", \"blackman\", \"rect\", \"sqrthann\"]: raise RuntimeError(f\"Unknown window", "init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode) self.K =", "N x (C) x B/2+1 x T if onesided: num_bins", "STFT if pre_emphasis > 0: # NC x W x", "mode=\"librosa\") -> None: super(STFTBase, self).__init__() K, w = init_kernel(frame_len, frame_hop,", "onesided: bool = True, center: bool = False, mode: str", "w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode)", "* (0.5 + 0.5 * np.cos(times / padding * math.pi))", "+ 1) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int,", "W x T, NC x 2B x T packed =", "(similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two)", "16000, num_mels: int = 80, fmin: float = 0.0, fmax:", "output (str), output format: polar: return (magnitude, phase) pair complex:", "mode: str = \"librosa\") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT", "real[:, reverse]], 1) imag = th.cat([imag, -imag[:, reverse]], 1) #", "the frame frame_hop: hop size between frames window: window name", "S, reshape N x 1 x S # else: reshape", "bool = True, normalized: bool = False, onesided: bool =", "src_sr: sample rate of the source signal dst_sr: sample rate", "gcd = math.gcd(src_sr, dst_sr) src_sr = src_sr // gcd dst_sr", "\"rect\": # match with librosa c = wnd_tpl[wnd](frame_len, periodic=True) else:", "(Tensor or [Tensor, Tensor]), STFT output Return s (Tensor), N", "slight difference on applying window function \"\"\" if isinstance(transform, th.Tensor):", "forward(self, transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input: str = \"polar\") ->", "ctx = [] T = feats.shape[-2] T = T -", "if onesided: # [self.num_bins - 2, ..., 1] reverse =", "subsampling_factor: subsampling factor op: operator on feature context Return: splice", "= (real**2 + imag**2 + EPSILON)**0.5 pha = th.atan2(imag, real)", "= wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len) return c def", "used in _forward_stft Return: wav (Tensor), N x S \"\"\"", "input == \"real\": real, imag = transform[..., 0], transform[..., 1]", "T x F x D splice = th.stack(ctx, -1) return", "tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] = frames[:,", "coefficients of the filter \"\"\" if src_sr == dst_sr: raise", "kernel (for iSTFT) \"\"\" def __init__(self, frame_len: int, frame_hop: int,", "1 + int(num_zeros / zeros_per_block) # dst_sr x src_sr x", "(num_bins - 1) * 2 # fmin & fmax freq_upper", "N = (num_bins - 1) * 2 # fmin &", "def _forward_stft( wav: th.Tensor, kernel: th.Tensor, output: str = \"polar\",", "stride={frame_hop}, onesided={onesided}, \" + f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \" + f\"center={self.center}, mode={self.mode},", "# [self.num_bins - 2, ..., 1] reverse = range(kernel.shape[0] //", "the target signal Return: weight (Tensor): coefficients of the filter", "type: {wnd}\") wnd_tpl = { \"sqrthann\": sqrthann, \"hann\": th.hann_window, \"hamm\":", "polar: return (magnitude, phase) pair complex: return (real, imag) pair", "* th.cos(transform[1]) imag = transform[0] * th.sin(transform[1]) else: real, imag", "use normalized DFT kernel pre_emphasis: factor of preemphasis mode: \"kaldi\"|\"librosa\",", "EPSILON from typing import Optional, Union, Tuple def init_window(wnd: str,", "STFT inner function Args: wav (Tensor), N x (C) x", "= \"polar\" ) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" Accept (single", "(magnitude, phase) pair complex: return (real, imag) pair real: return", "\"\"\" if input not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown", "factor of preemphasis onesided: return half FFT bins center: if", "mode={self.mode}, \" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self, wav_len: th.Tensor) -> th.Tensor:", "output: str = \"polar\", pre_emphasis: float = 0, frame_hop: int", "\"polar\") -> th.Tensor: \"\"\" Accept phase & magnitude and output", "x B x T real, imag = th.chunk(packed, 2, dim=-2)", "as np import torch as th import torch.nn as nn", "self).__init__(*args, inverse=True, **kwargs) def forward(self, transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input:", "or [Tensor, Tensor]), N x (C) x F x T", "raise ValueError(f\"Unknown op for feature splicing: {op}\") # [N x", "np import torch as th import torch.nn as nn import", "(in Hz) norm: normalize the mel filter coefficients \"\"\" #", "size normalized: return normalized DFT matrix inverse: return iDFT matrix", "!= \"rect\": # match with librosa c = wnd_tpl[wnd](frame_len, periodic=True)", "imag.dim() if imag_dim not in [2, 3]: raise RuntimeError(f\"Expect 2D/3D", "= self.K.shape[-1] if self.center: wav_len += kernel_size return (wav_len -", "real, imag = transform[..., 0], transform[..., 1] elif input ==", "[\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {output}\") if wav_dim", "tf.pad(window, (lpad, B - frame_len - lpad)) if normalized: #", "True, normalized: bool = False, pre_emphasis: float = 0, onesided:", "context subsampling_factor: subsampling factor op: operator on feature context Return:", "class iSTFT(STFTBase): \"\"\" Inverse Short-time Fourier Transform as a Layer", "int, frame_hop: int, window: str = \"sqrthann\", round_pow_of_two: bool =", "splice = th.cat(ctx, -1) else: # N x ... x", "numpy as np import torch as th import torch.nn as", "(Tensor), N x S \"\"\" return _inverse_stft(transform, self.K, self.w, input=input,", "N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm=\"slaney\" if norm else None)", "* K = I S = B**0.5 else: S =", "the frame frame_hop: hop size between frames output: output type", "= th.cat(ctx, -1) else: # N x ... x T", "wav_len += kernel_size return (wav_len - kernel_size) // self.frame_hop +", "I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1) # W x B", "window if needed if mode == \"librosa\" and B !=", "\" + f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \" + f\"center={self.center}, mode={self.mode}, \" +", "= 80, fmin: float = 0.0, fmax: Optional[float] = None,", "import torch.nn as nn import torch.nn.functional as tf import librosa.filters", "x 1 x W K = th.reshape(K, (B * 2,", "src_sr == dst_sr: raise ValueError( f\"src_sr should not be equal", "return normalized DFT matrix inverse: return iDFT matrix mode: framing", "wav_len: th.Tensor) -> th.Tensor: \"\"\" Compute number of the frames", "round_pow_of_two: bool = True, normalized: bool = False, inverse: bool", "\"\"\" if src_sr == dst_sr: raise ValueError( f\"src_sr should not", "samples less than frame_len ({self.frame_len})\") kernel_size = self.K.shape[-1] if self.center:", "weight = np.sinc( times * zeros_per_block) * window * zeros_per_block", "frames input: input format (complex, real, polar) window: window name", "kaldi) \"\"\" if mode not in [\"librosa\", \"kaldi\"]: raise ValueError(f\"Unsupported", "\"\"\" if mode not in [\"librosa\", \"kaldi\"]: raise ValueError(f\"Unsupported mode:", "* 2 # fmin & fmax freq_upper = sr //", "if true, choose round(#power_of_two) as the FFT size pre_emphasis: factor", "bool = True, inverse: bool = False, center: bool =", "Optional[int] = None, sr: int = 16000, num_mels: int =", "as tf import librosa.filters as filters from aps.const import EPSILON", "not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {output}\")", "int = 64) -> th.Tensor: \"\"\" Return speed perturb filters,", "src_sr == 1 or dst_sr == 1: raise ValueError(\"do not", "- frame_len - lpad)) if normalized: # make K^H *", "lctx + rctx == 0: return feats if op not", "got {wav_dim:d}D\") # if N x S, reshape N x", "th.stack([real, imag], dim=-1) else: mag = (real**2 + imag**2 +", "frame_len: length of the frame frame_hop: hop size between frames", "== \"real\": real, imag = transform[..., 0], transform[..., 1] elif", "iSTFT function implementation, equals to iSTFT layer Args: transform: results", "if imag_dim == 2: real = th.unsqueeze(real, 0) imag =", "window: str, round_pow_of_two: bool = True, normalized: bool = False,", "DFT kernel onesided: output onesided STFT inverse: using iDFT kernel", "**kwargs) def forward(self, transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input: str =", "pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window: th.Tensor,", "center: bool = False, mode: str = \"librosa\") -> th.Tensor:", "x B/2+1 x T if onesided: num_bins = kernel.shape[0] //", "kernel onesided: output onesided STFT mode: \"kaldi\"|\"librosa\", slight difference on", "waveform and output magnitude and phase Args wav (Tensor) input", "x T win = th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1) #", "the FFT size pre_emphasis: factor of preemphasis normalized: use normalized", "= K / B # 2 x B x W", "frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len:", "fmin = max(0, fmin) # mel filter coefficients mel =", "tf.pad(wav, (pad, pad), mode=\"reflect\") # STFT if pre_emphasis > 0:", "\"\"\" Return STFT kernels Args: frame_len: length of the frame", "x T, reshape 1 x F x T if imag_dim", "// self.frame_hop + 1 def extra_repr(self) -> str: return self.expr", "factor of preemphasis normalized: use normalized DFT kernel onesided: output", "reshape N x 1 x S # else: reshape NC", "2 + 1) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr:", "0, normalized: bool = False, onesided: bool = True, center:", "\"sqrthann\": sqrthann, \"hann\": th.hann_window, \"hamm\": th.hamming_window, \"blackman\": th.blackman_window, \"bartlett\": th.bartlett_window,", "matrix inverse: return iDFT matrix mode: framing mode (librosa or", "{input}\") if input == \"real\": real, imag = transform[..., 0],", "Optional, Union, Tuple def init_window(wnd: str, frame_len: int) -> th.Tensor:", "* cutoff_ratio padding = 1 + int(num_zeros / zeros_per_block) #", "and B != frame_len: lpad = (B - frame_len) //", "bool = False, onesided: bool = True, center: bool =", "mode self.num_bins = self.K.shape[0] // 4 + 1 self.expr =", "...], frames) else: packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0) #", "K^H * K = I S = B**0.5 else: S", "subsampling factor op: operator on feature context Return: splice (Tensor):", "phase) pair complex: return (real, imag) pair real: return [real;", "mag = (real**2 + imag**2 + EPSILON)**0.5 pha = th.atan2(imag,", "pha = th.atan2(imag, real) return (mag, pha) def _inverse_stft(transform: Union[th.Tensor,", "a Layer \"\"\" def __init__(self, *args, **kwargs): super(iSTFT, self).__init__(*args, inverse=True,", "else: packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0) # NC x", "N x (C) x B x T real, imag =", "1 x 1 x T norm = tf.conv_transpose1d(win**2, I, stride=frame_hop,", "= th.unsqueeze(imag, 0) if onesided: # [self.num_bins - 2, ...,", "T norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if center: pad", "Return transform (Tensor or [Tensor, Tensor]), N x (C) x", "\"bartlett\": th.bartlett_window, \"rect\": th.ones } if wnd != \"rect\": #", "- pre_emphasis * frames[:, :-1] # 1 x 2B x", "th.chunk(packed, 2, dim=-2) # N x (C) x B/2+1 x", "normalized=normalized, inverse=inverse, mode=mode) self.K = nn.Parameter(K, requires_grad=False) self.w = nn.Parameter(w,", "= max(0, fmin) # mel filter coefficients mel = filters.mel(sr,", "False, inverse: bool = False, mode: str = \"librosa\") ->", "imag = transform # (N) x F x T imag_dim", "None, :] + padding) window = np.heaviside(1 - np.abs(times /", "mode not in [\"librosa\", \"kaldi\"]: raise ValueError(f\"Unsupported mode: {mode}\") #", "imag[..., :num_bins, :] if output == \"complex\": return (real, imag)", "x B x W K = th.transpose(K, 0, 2) *", "filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm=\"slaney\" if norm else", "normalized: bool = False, onesided: bool = True, center: bool", "expect 2D/3D tensor, but got {wav_dim:d}D\") # if N x", "x T s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) # normalized", "K = I S = B**0.5 else: S = 1", "mode=\"reflect\") # STFT if pre_emphasis > 0: # NC x", "dst_sr: int, cutoff_ratio: float = 0.95, num_zeros: int = 64)", "number samples onesided: return half FFT bins center: used in", "bool = True, normalized: bool = False, inverse: bool =", "def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window: th.Tensor, input:", "freq_upper else: fmax = min(fmax + freq_upper if fmax <", "/ float(dst_sr) - np.arange(src_sr)[None, :, None] / float(src_sr) - np.arange(2", "(N) x F x T imag_dim = imag.dim() if imag_dim", "padded \"\"\" if lctx + rctx == 0: return feats", "Return mel filter coefficients Args: frame_len: length of the frame", "in [2, 3]: raise RuntimeError(f\"Expect 2D/3D tensor, but got {imag_dim}D\")", "for feature splicing: {op}\") # [N x ... x T", "# NC x W x T frames = tf.unfold(wav[:, None],", "filter coefficients Args: frame_len: length of the frame round_pow_of_two: if", "of preemphasis normalized: use normalized DFT kernel onesided: output onesided", "+ f\"center={self.center}, mode={self.mode}, \" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self, wav_len: th.Tensor)", "in [\"cat\", \"stack\"]: raise ValueError(f\"Unknown op for feature splicing: {op}\")", "frames window: window name round_pow_of_two: if true, choose round(#power_of_two) as", "inverse: using iDFT kernel (for iSTFT) mode: \"kaldi\"|\"librosa\", slight difference", "imag_dim = imag.dim() if imag_dim not in [2, 3]: raise", "(np.arange(dst_sr)[:, None, None] / float(dst_sr) - np.arange(src_sr)[None, :, None] /", "rctx == 0: return feats if op not in [\"cat\",", "= False, mode=\"librosa\") -> None: super(STFTBase, self).__init__() K, w =", "80, fmin: float = 0.0, fmax: Optional[float] = None, norm:", "results \"\"\" wav_dim = wav.dim() if output not in [\"polar\",", "function Args: transform (Tensor or [Tensor, Tensor]), STFT transform results", "window: str = \"sqrthann\", round_pow_of_two: bool = True, pre_emphasis: float", "frame_len: length of the frame round_pow_of_two: if true, choose round(#power_of_two)", "= \"polar\") -> th.Tensor: \"\"\" Accept phase & magnitude and", "else frame_len # center padding window if needed if mode", "highest frequency (in Hz) norm: normalize the mel filter coefficients", "with context padded \"\"\" if lctx + rctx == 0:", "equals to STFT layer Args: wav: source audio signal frame_len:", "rate of the target signal Return: weight (Tensor): coefficients of", "0, onesided: bool = True, inverse: bool = False, center:", "__init__(self, *args, **kwargs): super(iSTFT, self).__init__(*args, inverse=True, **kwargs) def forward(self, transform:", "frame_len - lpad)) if normalized: # make K^H * K", "ValueError(f\"Unsupported mode: {mode}\") # FFT points B = 2**math.ceil(math.log2(frame_len)) if", "1 or dst_sr == 1: raise ValueError(\"do not support integer", "\"complex\", window: str = \"sqrthann\", round_pow_of_two: bool = True, normalized:", "2 # fmin & fmax freq_upper = sr // 2", "= T - T % subsampling_factor for c in range(-lctx,", "speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of", "# FFT points B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len", "bool = True, num_bins: Optional[int] = None, sr: int =", "str = \"polar\") -> th.Tensor: \"\"\" Accept phase & magnitude", "preemphasis onesided: return half FFT bins center: if true, we", "raise RuntimeError(f\"Expect 2D/3D tensor, but got {imag_dim}D\") # if F", "th.Tensor: \"\"\" Return STFT kernels Args: frame_len: length of the", "if output not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output", "2, dim=-2) # N x (C) x B/2+1 x T", "win = th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1) # W x", "and not normalized: # to make K^H * K =", "but got {wav_dim:d}D\") # if N x S, reshape N", "kernel, stride=frame_hop, padding=0) # normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171", "x F, original feature lctx: left context rctx: right context", "with librosa c = wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len)", "if fmax < 0 else fmax, freq_upper) fmin = max(0,", "th.cat([real, real[:, reverse]], 1) imag = th.cat([imag, -imag[:, reverse]], 1)", "librosa.filters as filters from aps.const import EPSILON from typing import", "input: str = \"complex\", window: str = \"sqrthann\", round_pow_of_two: bool", "\"\"\" # FFT points if num_bins is None: N =", "splice_feature(feats: th.Tensor, lctx: int = 1, rctx: int = 1,", "4 + 1 self.expr = ( f\"window={window}, stride={frame_hop}, onesided={onesided}, \"", "STFT inverse: using iDFT kernel (for iSTFT) \"\"\" def __init__(self,", "window function \"\"\" if isinstance(transform, th.Tensor): device = transform.device else:", "the source signal dst_sr: sample rate of the target signal", "[\"bartlett\", \"hann\", \"hamm\", \"blackman\", \"rect\", \"sqrthann\"]: raise RuntimeError(f\"Unknown window type:", "\"polar\", pre_emphasis: float = 0, frame_hop: int = 256, onesided:", "factor of preemphasis mode: \"kaldi\"|\"librosa\", slight difference on applying window", "length of the frame \"\"\" def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len,", "\"real\"]: raise ValueError(f\"Unknown output format: {input}\") if input == \"real\":", "= real[..., :num_bins, :] imag = imag[..., :num_bins, :] if", "th.matmul(kernel[:, 0][None, ...], frames) else: packed = tf.conv1d(wav, kernel, stride=frame_hop,", "channel) raw waveform and output magnitude and phase Args wav", "else frame_len else: N = (num_bins - 1) * 2", "= False, onesided: bool = True, center: bool = False,", "as the FFT size normalized: return normalized DFT matrix inverse:", "librosa c = wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len) return", "padding window if needed if mode == \"librosa\" and B", "(str), input format: polar: return (magnitude, phase) pair complex: return", "N x C x 2B x T if wav_dim ==", "not in [2, 3]: raise RuntimeError(f\"Expect 2D/3D tensor, but got", "reverse]], 1) imag = th.cat([imag, -imag[:, reverse]], 1) # pack:", "\"\"\" Compute number of the frames \"\"\" if th.sum(wav_len <=", "type (complex, real, polar) window: window name center: center flag", "1 x W x T win = th.repeat_interleave(window[None, ..., None],", "th.zeros(B, B)], dim=-1) # W x B x 2 K", "(for iSTFT) \"\"\" def __init__(self, frame_len: int, frame_hop: int, window:", "if N x S, reshape N x 1 x S", "int, output: str = \"complex\", window: str = \"sqrthann\", round_pow_of_two:", "# num_mels x (N // 2 + 1) return th.tensor(mel,", "0) imag = th.unsqueeze(imag, 0) if onesided: # [self.num_bins -", "subsampling_factor for c in range(-lctx, rctx + 1): idx =", "frame_len) // 2 window = tf.pad(window, (lpad, B - frame_len", "+ EPSILON)**0.5 pha = th.atan2(imag, real) return (mag, pha) def", "T real, imag = th.chunk(packed, 2, dim=-2) # N x", "feature lctx: left context rctx: right context subsampling_factor: subsampling factor", "Args: frame_len: length of the frame frame_hop: hop size between", "1, K.shape[-1])) return K, window def mel_filter(frame_len: int, round_pow_of_two: bool", "None) # num_mels x (N // 2 + 1) return", "center: bool = False) -> th.Tensor: \"\"\" iSTFT inner function", "T x F, ...] ctx = [] T = feats.shape[-2]", "or [Tensor, Tensor]), STFT transform results kernel (Tensor), STFT transform", "W K = th.reshape(K, (B * 2, 1, K.shape[-1])) return", "None, sr: int = 16000, num_mels: int = 80, fmin:", "\"kaldi\"|\"librosa\", slight difference on applying window function onesided: output onesided", "round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop,", "kernels, from init_kernel(...) input (str), input format: polar: return (magnitude,", "th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int = 1, rctx:", "mel filter coefficients \"\"\" # FFT points if num_bins is", "= center self.mode = mode self.num_bins = self.K.shape[0] // 4", "\" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self, wav_len: th.Tensor) -> th.Tensor: \"\"\"", "\"polar\": real = transform[0] * th.cos(transform[1]) imag = transform[0] *", "freq_upper = sr // 2 if fmax is None: fmax", "\"real\": return th.stack([real, imag], dim=-1) else: mag = (real**2 +", "needed if mode == \"librosa\" and B != frame_len: lpad", "2 K = th.fft(I / S, 1) if mode ==", "output: str = \"complex\", window: str = \"sqrthann\", round_pow_of_two: bool", "(str), output format: polar: return (magnitude, phase) pair complex: return", "1] reverse = range(kernel.shape[0] // 4 - 1, 0, -1)", "function Args: wav (Tensor), N x (C) x S kernel", "round(#power_of_two) as the FFT size normalized: use normalized DFT kernel", "if num_bins is None: N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two", "- lpad)) if normalized: # make K^H * K =", "= wav.view(-1, 1, S) # NC x 1 x S+2P", "input format: polar: return (magnitude, phase) pair complex: return (real,", "# W x 1 x W I = th.eye(window.shape[0], device=win.device)[:,", "Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input: str = \"polar\") -> th.Tensor: \"\"\"", "2B x T => N x C x 2B x", "EPSILON)**0.5 pha = th.atan2(imag, real) return (mag, pha) def _inverse_stft(transform:", "= th.cat([imag, -imag[:, reverse]], 1) # pack: N x 2B", "c = wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len: int, frame_hop: int,", "op for feature splicing: {op}\") # [N x ... x", "make K^H * K = I K = K /", "by STFT num_mels: number of the mel bands fmin: lowest", "Return window coefficient Args: wnd: window name frame_len: length of", "(C) x B/2+1 x T if onesided: num_bins = kernel.shape[0]", "times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) - np.arange(src_sr)[None, :,", "output onesided STFT mode: \"kaldi\"|\"librosa\", slight difference on applying window", "== \"real\": return th.stack([real, imag], dim=-1) else: mag = (real**2", "= kernel.shape[0] // 4 + 1 real = real[..., :num_bins,", "= \"cat\") -> th.Tensor: \"\"\" Splice feature Args: feats (Tensor):", "freq_upper) fmin = max(0, fmin) # mel filter coefficients mel", "- np.arange(2 * padding + 1)[None, None, :] + padding)", "if wnd not in [\"bartlett\", \"hann\", \"hamm\", \"blackman\", \"rect\", \"sqrthann\"]:", "\"hann\": th.hann_window, \"hamm\": th.hamming_window, \"blackman\": th.blackman_window, \"bartlett\": th.bartlett_window, \"rect\": th.ones", "init_window(wnd: str, frame_len: int) -> th.Tensor: \"\"\" Return window coefficient", "wav_dim not in [2, 3]: raise RuntimeError(f\"STFT expect 2D/3D tensor,", "Return: wav (Tensor), N x S \"\"\" if input not", "inverse=True, **kwargs) def forward(self, transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input: str", "[self.num_bins - 2, ..., 1] reverse = range(kernel.shape[0] // 4", "== 3: packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1]) # N", "fmin: lowest frequency (in Hz) fmax: highest frequency (in Hz)", "if mode == \"kaldi\": K = K[:frame_len] if inverse and", "FFT points if num_bins is None: N = 2**math.ceil( math.log2(frame_len))", "1): idx = th.arange(c, c + T, device=feats.device, dtype=th.int64) idx", "False, mode: str = \"librosa\") -> th.Tensor: \"\"\" Return STFT", "transform[0] * th.sin(transform[1]) else: real, imag = transform # (N)", "iSTFT) \"\"\" def __init__(self, frame_len: int, frame_hop: int, window: str", "the FFT size normalized: use normalized DFT kernel pre_emphasis: factor", "s def forward_stft( wav: th.Tensor, frame_len: int, frame_hop: int, output:", "size between frames window: window name round_pow_of_two: if true, choose", "FFT size normalized: use normalized DFT kernel onesided: output onesided", "... x T x F, original feature lctx: left context", "window name round_pow_of_two: if true, choose round(#power_of_two) as the FFT", "x S # else: reshape NC x 1 x S", "= \"librosa\") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT function implementation,", "float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int =", "w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center) class STFTBase(nn.Module): \"\"\" Base layer", "on applying window function \"\"\" K, _ = init_kernel(frame_len, frame_hop,", "init_kernel(frame_len: int, frame_hop: int, window: str, round_pow_of_two: bool = True,", "output magnitude and phase Args wav (Tensor) input signal, N", "self, wav: th.Tensor, output: str = \"polar\" ) -> Union[th.Tensor,", "else: c = wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len: int, frame_hop:", "= False) -> th.Tensor: \"\"\" iSTFT inner function Args: transform", "else: N = (num_bins - 1) * 2 # fmin", "np.arange(src_sr)[None, :, None] / float(src_sr) - np.arange(2 * padding +", "of the frame round_pow_of_two: if true, choose round(#power_of_two) as the", "frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode) self.K = nn.Parameter(K,", "round_pow_of_two: bool = True, pre_emphasis: float = 0, normalized: bool", "mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm=\"slaney\" if", "..., None], packed.shape[-1], dim=-1) # W x 1 x W", "onesided: bool = False, center: bool = False) -> th.Tensor:", "_forward_stft Return: wav (Tensor), N x S \"\"\" if input", "hop size in number samples pre_emphasis: factor of preemphasis onesided:", "elif input == \"polar\": real = transform[0] * th.cos(transform[1]) imag", "ValueError(f\"Unknown output format: {input}\") if input == \"real\": real, imag", "Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop: int, input: str = \"complex\",", "x T if wav_dim == 3: packed = packed.view(N, -1,", "== 1 or dst_sr == 1: raise ValueError(\"do not support", "pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class iSTFT(STFTBase): \"\"\" Inverse Short-time Fourier Transform", "[Tensor, Tensor]), STFT transform results \"\"\" wav_dim = wav.dim() if", "mode (librosa or kaldi) \"\"\" if mode not in [\"librosa\",", "x 1 x S+2P if center: pad = kernel.shape[-1] //", "output == \"complex\": return (real, imag) elif output == \"real\":", "\"real\"]: raise ValueError(f\"Unknown output format: {output}\") if wav_dim not in", "init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav,", "th.hamming_window, \"blackman\": th.blackman_window, \"bartlett\": th.bartlett_window, \"rect\": th.ones } if wnd", "\"kaldi\"]: raise ValueError(f\"Unsupported mode: {mode}\") # FFT points B =", "transform[..., 0], transform[..., 1] elif input == \"polar\": real =", "size in number samples pre_emphasis: factor of preemphasis onesided: return", "math.log2(frame_len)) if round_pow_of_two else frame_len else: N = (num_bins -", "self.K.shape[0] // 4 + 1 self.expr = ( f\"window={window}, stride={frame_hop},", "1: raise ValueError(\"do not support integer downsample/upsample\") zeros_per_block = min(src_sr,", "S = B**0.5 else: S = 1 I = th.stack([th.eye(B),", "dst_sr x src_sr x K times = (np.arange(dst_sr)[:, None, None]", "bins center: if true, we assumed to have centered frames", "self.K = nn.Parameter(K, requires_grad=False) self.w = nn.Parameter(w, requires_grad=False) self.frame_len =", "num_bins: number of the frequency bins produced by STFT num_mels:", "return (magnitude, phase) pair complex: return (real, imag) pair real:", "true, we assumed to have centered frames Return: transform (Tensor", "= \"librosa\") -> th.Tensor: \"\"\" Return STFT kernels Args: frame_len:", "fmax, freq_upper) fmin = max(0, fmin) # mel filter coefficients", "FFT size normalized: return normalized DFT matrix inverse: return iDFT", "(C) x B x T real, imag = th.chunk(packed, 2,", "idx = th.arange(c, c + T, device=feats.device, dtype=th.int64) idx =", "splice = th.stack(ctx, -1) return splice def _forward_stft( wav: th.Tensor,", "pad), mode=\"reflect\") # STFT if pre_emphasis > 0: # NC", "\"librosa\") -> th.Tensor: \"\"\" iSTFT function implementation, equals to iSTFT", "B x T real = th.cat([real, real[:, reverse]], 1) imag", "input: str = \"polar\", frame_hop: int = 256, onesided: bool", "operator on feature context Return: splice (Tensor): feature with context", "# N x ... x T x FD splice =", "K^H * K = I K = K / B", "dst_sr) src_sr = src_sr // gcd dst_sr = dst_sr //", "# NOTE: match with librosa wav = tf.pad(wav, (pad, pad),", "int, window: str = \"sqrthann\", round_pow_of_two: bool = True, normalized:", "of the source signal dst_sr: sample rate of the target", "and output magnitude and phase Args wav (Tensor) input signal,", "0) if onesided: # [self.num_bins - 2, ..., 1] reverse", "= True, num_bins: Optional[int] = None, sr: int = 16000,", "x (N // 2 + 1) return th.tensor(mel, dtype=th.float32) def", "window def mel_filter(frame_len: int, round_pow_of_two: bool = True, num_bins: Optional[int]", "for (i)STFT Args: frame_len: length of the frame frame_hop: hop", "(for iSTFT) mode: \"kaldi\"|\"librosa\", slight difference on applying window function", "filters from aps.const import EPSILON from typing import Optional, Union,", "padding=0) if center: pad = kernel.shape[-1] // 2 s =", "_inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center) class STFTBase(nn.Module): \"\"\"", "F, ...] ctx = [] T = feats.shape[-2] T =", "STFT frame_len: length of the frame frame_hop: hop size between", "th.fft(I / S, 1) if mode == \"kaldi\": K =", "th.Tensor: \"\"\" Return window coefficient Args: wnd: window name frame_len:", "Args wav (Tensor) input signal, N x (C) x S", "frame_hop: hop size between frames window: window name center: center", "# 2 x B x W K = th.transpose(K, 0,", "sample rate of the source signal dst_sr: sample rate of", "range(-lctx, rctx + 1): idx = th.arange(c, c + T,", "def extra_repr(self) -> str: return self.expr class STFT(STFTBase): \"\"\" Short-time", "= 1, op: str = \"cat\") -> th.Tensor: \"\"\" Splice", "class STFT(STFTBase): \"\"\" Short-time Fourier Transform as a Layer \"\"\"", "raise ValueError(f\"Unknown output format: {input}\") if input == \"real\": real,", "center: used in _forward_stft Return: wav (Tensor), N x S", "\"\"\" STFT function implementation, equals to STFT layer Args: wav:", "self.expr class STFT(STFTBase): \"\"\" Short-time Fourier Transform as a Layer", "1) # pack: N x 2B x T packed =", "as the FFT size pre_emphasis: factor of preemphasis normalized: use", "# if F x T, reshape 1 x F x", "th.eye(window.shape[0], device=win.device)[:, None] # 1 x 1 x T norm", "fmin & fmax freq_upper = sr // 2 if fmax", "STFT(STFTBase): \"\"\" Short-time Fourier Transform as a Layer \"\"\" def", "0, 2) * window # 2B x 1 x W", "pre_emphasis: factor of preemphasis normalized: use normalized DFT kernel onesided:", "\"blackman\": th.blackman_window, \"bartlett\": th.bartlett_window, \"rect\": th.ones } if wnd !=", "dst_sr) * cutoff_ratio padding = 1 + int(num_zeros / zeros_per_block)", "window function onesided: output onesided STFT inverse: using iDFT kernel", "dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int = 1, rctx: int", "Return STFT kernels Args: frame_len: length of the frame frame_hop:", "reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of the source signal", "- np.arange(src_sr)[None, :, None] / float(src_sr) - np.arange(2 * padding", "sr // 2 if fmax is None: fmax = freq_upper", "onesided STFT inverse: using iDFT kernel (for iSTFT) mode: \"kaldi\"|\"librosa\",", "# Copyright 2019 <NAME> # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import", "window: str = \"sqrthann\", round_pow_of_two: bool = True, normalized: bool", "= tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) # normalized audio samples #", "imag_dim not in [2, 3]: raise RuntimeError(f\"Expect 2D/3D tensor, but", "on applying window function \"\"\" if isinstance(transform, th.Tensor): device =", "T real = th.cat([real, real[:, reverse]], 1) imag = th.cat([imag,", "// gcd if src_sr == 1 or dst_sr == 1:", "wav: th.Tensor, output: str = \"polar\" ) -> Union[th.Tensor, Tuple[th.Tensor,", "# [N x ... x T x F, ...] ctx", "c = wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len) return c", "or dst_sr == 1: raise ValueError(\"do not support integer downsample/upsample\")", "STFT function implementation, equals to STFT layer Args: wav: source", "th.blackman_window, \"bartlett\": th.bartlett_window, \"rect\": th.ones } if wnd != \"rect\":", "dim=1) # N x 1 x T s = tf.conv_transpose1d(packed,", "= { \"sqrthann\": sqrthann, \"hann\": th.hann_window, \"hamm\": th.hamming_window, \"blackman\": th.blackman_window,", "-> th.Tensor: \"\"\" Return window coefficient Args: wnd: window name", "# else: reshape NC x 1 x S N, S", "th.Tensor: \"\"\" iSTFT inner function Args: transform (Tensor or [Tensor,", "min=0, max=T - 1) ctx.append(th.index_select(feats, -2, idx)) if op ==", "window function \"\"\" K, _ = init_kernel(frame_len, frame_hop, init_window(window, frame_len),", "inverse and not normalized: # to make K^H * K", "_forward_stft( wav: th.Tensor, kernel: th.Tensor, output: str = \"polar\", pre_emphasis:", "normalized: bool = False, pre_emphasis: float = 0, onesided: bool", "periodic=periodic)**0.5 if wnd not in [\"bartlett\", \"hann\", \"hamm\", \"blackman\", \"rect\",", "window type: {wnd}\") wnd_tpl = { \"sqrthann\": sqrthann, \"hann\": th.hann_window,", "Base layer for (i)STFT Args: frame_len: length of the frame", "\"\"\" if lctx + rctx == 0: return feats if", "raise ValueError( f\"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}\")", "\"\"\" Accept phase & magnitude and output raw waveform Args", "real = real[..., :num_bins, :] imag = imag[..., :num_bins, :]", "+ f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self, wav_len: th.Tensor) -> th.Tensor: \"\"\" Compute", "th.cat([real, imag], dim=1) # N x 1 x T s", "\"\"\" Return window coefficient Args: wnd: window name frame_len: length", "= K[:frame_len] if inverse and not normalized: # to make", "frames[:, 1:] - pre_emphasis * frames[:, :-1] # 1 x", "typing import Optional, Union, Tuple def init_window(wnd: str, frame_len: int)", "= None, sr: int = 16000, num_mels: int = 80,", "th.Tensor: \"\"\" Compute number of the frames \"\"\" if th.sum(wav_len", "Args: feats (Tensor): N x ... x T x F,", "Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" Accept (single or multiple channel) raw", "1:] - pre_emphasis * frames[:, :-1] # 1 x 2B", "float = 0, normalized: bool = False, onesided: bool =", "x FD splice = th.cat(ctx, -1) else: # N x", "= 256, onesided: bool = False, center: bool = False)", "real, polar) window: window name center: center flag (similar with", "matrix: N x B x T real = th.cat([real, real[:,", "min(src_sr, dst_sr) * cutoff_ratio padding = 1 + int(num_zeros /", "normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W", "pair real: return [real; imag] Tensor frame_hop: frame hop size", "D splice = th.stack(ctx, -1) return splice def _forward_stft( wav:", "reverse = range(kernel.shape[0] // 4 - 1, 0, -1) #", "None] / float(src_sr) - np.arange(2 * padding + 1)[None, None,", "else: device = transform[0].device K, w = init_kernel(frame_len, frame_hop, init_window(window,", "to STFT layer Args: wav: source audio signal frame_len: length", "pack: N x 2B x T packed = th.cat([real, imag],", "dst_sr = dst_sr // gcd if src_sr == 1 or", "raw waveform and output magnitude and phase Args wav (Tensor)", "(single or multiple channel) raw waveform and output magnitude and", "th.Tensor, output: str = \"polar\", pre_emphasis: float = 0, frame_hop:", "= filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm=\"slaney\" if norm", "feats.shape[-2] T = T - T % subsampling_factor for c", "feature splicing: {op}\") # [N x ... x T x", "raise RuntimeError( f\"Audio samples less than frame_len ({self.frame_len})\") kernel_size =", "window * zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats:", "length of the frame round_pow_of_two: if true, choose round(#power_of_two) as", "difference on applying window function onesided: output onesided STFT inverse:", "float = 0, onesided: bool = True, inverse: bool =", "th.Tensor, lctx: int = 1, rctx: int = 1, subsampling_factor:", "pad:-pad] norm = norm[..., pad:-pad] s = s / (norm", "/ S, 1) if mode == \"kaldi\": K = K[:frame_len]", "RuntimeError(f\"STFT expect 2D/3D tensor, but got {wav_dim:d}D\") # if N", "normalized: use normalized DFT kernel onesided: output onesided STFT inverse:", "= th.matmul(kernel[:, 0][None, ...], frames) else: packed = tf.conv1d(wav, kernel,", "x T => N x C x 2B x T", "\" + f\"center={self.center}, mode={self.mode}, \" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self, wav_len:", "s / (norm + EPSILON) # N x S s", "th.hann_window, \"hamm\": th.hamming_window, \"blackman\": th.blackman_window, \"bartlett\": th.bartlett_window, \"rect\": th.ones }", "lctx: left context rctx: right context subsampling_factor: subsampling factor op:", "= \"complex\", window: str = \"sqrthann\", round_pow_of_two: bool = True,", "kernel_size) // self.frame_hop + 1 def extra_repr(self) -> str: return", "raise ValueError(f\"Unsupported mode: {mode}\") # FFT points B = 2**math.ceil(math.log2(frame_len))", "th.Tensor) -> th.Tensor: \"\"\" Compute number of the frames \"\"\"", "th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not in [\"bartlett\", \"hann\", \"hamm\", \"blackman\",", "+ rctx == 0: return feats if op not in", "perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of the", "/ float(src_sr) - np.arange(2 * padding + 1)[None, None, :]", "None, norm: bool = False) -> th.Tensor: \"\"\" Return mel", "# NC x 2B x T => N x C", "= init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode) self.K", "have centered frames Return: transform (Tensor or [Tensor, Tensor]), STFT", "torch.nn as nn import torch.nn.functional as tf import librosa.filters as", "format: {input}\") if input == \"real\": real, imag = transform[...,", "_inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window: th.Tensor, input: str", "int, cutoff_ratio: float = 0.95, num_zeros: int = 64) ->", "# 2B x 1 x W K = th.reshape(K, (B", "between frames input: input format (complex, real, polar) window: window", "W I = th.eye(window.shape[0], device=win.device)[:, None] # 1 x 1", "onesided: bool = False, center: bool = False) -> Union[th.Tensor,", "in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {output}\") if", "op: str = \"cat\") -> th.Tensor: \"\"\" Splice feature Args:", "<= self.frame_len): raise RuntimeError( f\"Audio samples less than frame_len ({self.frame_len})\")", "bool = False, center: bool = False) -> th.Tensor: \"\"\"", "lpad = (B - frame_len) // 2 window = tf.pad(window,", "STFT transform results \"\"\" wav_dim = wav.dim() if output not", "2D/3D tensor, but got {wav_dim:d}D\") # if N x S,", "Splice feature Args: feats (Tensor): N x ... x T", "(real, imag) pair real: return [real; imag] Tensor frame_hop: frame", "= tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] =", "- frame_len) // 2 window = tf.pad(window, (lpad, B -", "output Return s (Tensor), N x S \"\"\" return _inverse_stft(transform,", "/ (norm + EPSILON) # N x S s =", "-1) # extend matrix: N x B x T real", "K = K[:frame_len] if inverse and not normalized: # to", "I S = B**0.5 else: S = 1 I =", "wav (Tensor), N x S \"\"\" if input not in", "[Tensor, Tensor]), STFT output Return s (Tensor), N x S", "pre_emphasis: float = 0, onesided: bool = True, inverse: bool", "[Tensor, Tensor]), N x (C) x F x T \"\"\"", "int = 80, fmin: float = 0.0, fmax: Optional[float] =", "str = \"sqrthann\", round_pow_of_two: bool = True, normalized: bool =", "Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window: th.Tensor, input: str =", "RuntimeError(f\"Expect 2D/3D tensor, but got {imag_dim}D\") # if F x", "phase & magnitude and output raw waveform Args transform (Tensor", "2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import numpy as np import torch", "requires_grad=False) self.w = nn.Parameter(w, requires_grad=False) self.frame_len = frame_len self.frame_hop =", "to iSTFT layer Args: transform: results of STFT frame_len: length", "kernel onesided: output onesided STFT inverse: using iDFT kernel (for", "points if num_bins is None: N = 2**math.ceil( math.log2(frame_len)) if", "(Tensor or [Tensor, Tensor]), STFT transform results \"\"\" wav_dim =", "N x (C) x S Return transform (Tensor or [Tensor,", "import torch.nn.functional as tf import librosa.filters as filters from aps.const", "# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W x T win", "Args transform (Tensor or [Tensor, Tensor]), STFT output Return s", "F x T if imag_dim == 2: real = th.unsqueeze(real,", "\"\"\" Return mel filter coefficients Args: frame_len: length of the", "# 1 x 2B x W, NC x W x", "transform (Tensor or [Tensor, Tensor]), STFT transform results kernel (Tensor),", "of the target signal Return: weight (Tensor): coefficients of the", "= False, mode: str = \"librosa\") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:", "window name frame_len: length of the frame \"\"\" def sqrthann(frame_len,", "if onesided: num_bins = kernel.shape[0] // 4 + 1 real", "None] # 1 x 1 x T norm = tf.conv_transpose1d(win**2,", "use normalized DFT kernel onesided: output onesided STFT mode: \"kaldi\"|\"librosa\",", "\"hann\", \"hamm\", \"blackman\", \"rect\", \"sqrthann\"]: raise RuntimeError(f\"Unknown window type: {wnd}\")", "1 x F x T if imag_dim == 2: real", "1 x S+2P if center: pad = kernel.shape[-1] // 2", "not in [\"bartlett\", \"hann\", \"hamm\", \"blackman\", \"rect\", \"sqrthann\"]: raise RuntimeError(f\"Unknown", "Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT inner function Args: wav (Tensor),", "is None: N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len", "B x T real, imag = th.chunk(packed, 2, dim=-2) #", "transform (Tensor or [Tensor, Tensor]), N x (C) x F", "padding * math.pi)) weight = np.sinc( times * zeros_per_block) *", "Hz) fmax: highest frequency (in Hz) norm: normalize the mel", "True, normalized: bool = False, inverse: bool = False, mode:", "float(src_sr) - np.arange(2 * padding + 1)[None, None, :] +", "256, onesided: bool = False, center: bool = False) ->", "x B x T real = th.cat([real, real[:, reverse]], 1)", "range(kernel.shape[0] // 4 - 1, 0, -1) # extend matrix:", "Accept (single or multiple channel) raw waveform and output magnitude", "= False, pre_emphasis: float = 0, onesided: bool = True,", "- 1, 0, -1) # extend matrix: N x B", "transform results kernel (Tensor), STFT transform kernels, from init_kernel(...) input", "= wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len: int, frame_hop: int, window:", "normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided,", "return (wav_len - kernel_size) // self.frame_hop + 1 def extra_repr(self)", "and phase Args wav (Tensor) input signal, N x (C)", "Layer \"\"\" def __init__(self, *args, **kwargs): super(STFT, self).__init__(*args, inverse=False, **kwargs)", "slight difference on applying window function \"\"\" K, _ =", "mode: \"kaldi\"|\"librosa\", slight difference on applying window function \"\"\" K,", "K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse,", "f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self, wav_len: th.Tensor) -> th.Tensor: \"\"\" Compute number", "x 2B x W, NC x W x T, NC", "signal dst_sr: sample rate of the target signal Return: weight", "normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided,", "feats (Tensor): N x ... x T x F, original", "th.transpose(K, 0, 2) * window # 2B x 1 x", "padding + 1)[None, None, :] + padding) window = np.heaviside(1", "round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized:", "T packed = th.matmul(kernel[:, 0][None, ...], frames) else: packed =", "(0.5 + 0.5 * np.cos(times / padding * math.pi)) weight", "layer Args: transform: results of STFT frame_len: length of the", "FFT bins center: if true, we assumed to have centered", "if norm else None) # num_mels x (N // 2", "window = np.heaviside(1 - np.abs(times / padding), 0.0) * (0.5", "Args: wav: source audio signal frame_len: length of the frame", "+ 1 real = real[..., :num_bins, :] imag = imag[...,", "return (mag, pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor,", "imag = imag[..., :num_bins, :] if output == \"complex\": return", "round(#power_of_two) as the FFT size normalized: return normalized DFT matrix", "x 1 x T norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0)", "(B - frame_len) // 2 window = tf.pad(window, (lpad, B", "gcd dst_sr = dst_sr // gcd if src_sr == 1", "1, subsampling_factor: int = 1, op: str = \"cat\") ->", "kernel.shape[-1] // 2 s = s[..., pad:-pad] norm = norm[...,", "than frame_len ({self.frame_len})\") kernel_size = self.K.shape[-1] if self.center: wav_len +=", "fmax = min(fmax + freq_upper if fmax < 0 else", "(i)STFT Args: frame_len: length of the frame frame_hop: hop size", "(Tensor): coefficients of the filter \"\"\" if src_sr == dst_sr:", "x 2B x T => N x C x 2B", "th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1) # W x 1 x", "= min(fmax + freq_upper if fmax < 0 else fmax,", "I K = K / B # 2 x B", "= th.transpose(K, 0, 2) * window # 2B x 1", "x S Return transform (Tensor or [Tensor, Tensor]), N x", "number samples pre_emphasis: factor of preemphasis onesided: return half FFT", "f\"window={window}, stride={frame_hop}, onesided={onesided}, \" + f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \" + f\"center={self.center},", "match with librosa c = wnd_tpl[wnd](frame_len, periodic=True) else: c =", "\"\"\" Base layer for (i)STFT Args: frame_len: length of the", "else: fmax = min(fmax + freq_upper if fmax < 0", "onesided: bool = True, inverse: bool = False, center: bool", "frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class iSTFT(STFTBase): \"\"\" Inverse Short-time Fourier", "s[..., pad:-pad] norm = norm[..., pad:-pad] s = s /", "frame frame_hop: hop size between frames window: window name round_pow_of_two:", "frame_len: length of the frame \"\"\" def sqrthann(frame_len, periodic=True): return", "\"rect\", \"sqrthann\"]: raise RuntimeError(f\"Unknown window type: {wnd}\") wnd_tpl = {", "= th.atan2(imag, real) return (mag, pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor,", "size num_bins: number of the frequency bins produced by STFT", "= transform[0] * th.cos(transform[1]) imag = transform[0] * th.sin(transform[1]) else:", "return _forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class iSTFT(STFTBase):", "> 0: # NC x W x T frames =", "frames[:, :-1] # 1 x 2B x W, NC x", "frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]", "hop size between frames window: window name center: center flag", "normalized: use normalized DFT kernel onesided: output onesided STFT mode:", "= imag.dim() if imag_dim not in [2, 3]: raise RuntimeError(f\"Expect", "= th.chunk(packed, 2, dim=-2) # N x (C) x B/2+1", "hop size between frames input: input format (complex, real, polar)", "STFT layer Args: wav: source audio signal frame_len: length of", "NC x W x T, NC x 2B x T", "flag (similar with that in librosa.stft) round_pow_of_two: if true, choose", "K.shape[-1])) return K, window def mel_filter(frame_len: int, round_pow_of_two: bool =", "not in [\"cat\", \"stack\"]: raise ValueError(f\"Unknown op for feature splicing:", "onesided: output onesided STFT mode: \"kaldi\"|\"librosa\", slight difference on applying", "// 2 if fmax is None: fmax = freq_upper else:", "= 0.95, num_zeros: int = 64) -> th.Tensor: \"\"\" Return", "sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not in [\"bartlett\",", "/ zeros_per_block) # dst_sr x src_sr x K times =", "+ padding) window = np.heaviside(1 - np.abs(times / padding), 0.0)", "frames output: output type (complex, real, polar) window: window name", "np.cos(times / padding * math.pi)) weight = np.sinc( times *", "wav = tf.pad(wav, (pad, pad), mode=\"reflect\") # STFT if pre_emphasis", "str = \"complex\", window: str = \"sqrthann\", round_pow_of_two: bool =", "number of the mel bands fmin: lowest frequency (in Hz)", "dst_sr == 1: raise ValueError(\"do not support integer downsample/upsample\") zeros_per_block", "import torch as th import torch.nn as nn import torch.nn.functional", "frame_len: lpad = (B - frame_len) // 2 window =", "context rctx: right context subsampling_factor: subsampling factor op: operator on", "the frame \"\"\" def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if", "\"sqrthann\", round_pow_of_two: bool = True, normalized: bool = False, pre_emphasis:", "for c in range(-lctx, rctx + 1): idx = th.arange(c,", "not normalized: # to make K^H * K = I", "math import numpy as np import torch as th import", "x S, reshape N x 1 x S # else:", "zeros_per_block) * window * zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32)", "tensor, but got {imag_dim}D\") # if F x T, reshape", "0], transform[..., 1] elif input == \"polar\": real = transform[0]", "F x T \"\"\" return _forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis,", "1 def extra_repr(self) -> str: return self.expr class STFT(STFTBase): \"\"\"", "center: pad = kernel.shape[-1] // 2 s = s[..., pad:-pad]", "mode: str = \"librosa\") -> th.Tensor: \"\"\" iSTFT function implementation,", "N x ... x T x F, original feature lctx:", "else: S = 1 I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1)", "transform[0] * th.cos(transform[1]) imag = transform[0] * th.sin(transform[1]) else: real,", "wav: th.Tensor, kernel: th.Tensor, output: str = \"polar\", pre_emphasis: float", "... x T x FD splice = th.cat(ctx, -1) else:", "center: if true, we assumed to have centered frames Return:", "transform kernels, from init_kernel(...) input (str), input format: polar: return", "return iDFT matrix mode: framing mode (librosa or kaldi) \"\"\"", "zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx:", "implementation, equals to STFT layer Args: wav: source audio signal", "ctx.append(th.index_select(feats, -2, idx)) if op == \"cat\": # N x", "x (C) x S kernel (Tensor), STFT transform kernels, from", "padding=0) # NC x 2B x T => N x", "Short-time Fourier Transform as a Layer \"\"\" def __init__(self, *args,", "normalized DFT kernel onesided: output onesided STFT mode: \"kaldi\"|\"librosa\", slight", "-> str: return self.expr class STFT(STFTBase): \"\"\" Short-time Fourier Transform", "= feats.shape[-2] T = T - T % subsampling_factor for", "zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio padding = 1 +", "int) -> th.Tensor: \"\"\" Return window coefficient Args: wnd: window", "real: return [real; imag] Tensor frame_hop: frame hop size in", "DFT matrix inverse: return iDFT matrix mode: framing mode (librosa", "S \"\"\" return _inverse_stft(transform, self.K, self.w, input=input, frame_hop=self.frame_hop, onesided=self.onesided, center=self.center)", "2, 1, K.shape[-1])) return K, window def mel_filter(frame_len: int, round_pow_of_two:", "FD splice = th.cat(ctx, -1) else: # N x ...", "return (real, imag) elif output == \"real\": return th.stack([real, imag],", "import librosa.filters as filters from aps.const import EPSILON from typing", "iDFT kernel (for iSTFT) mode: \"kaldi\"|\"librosa\", slight difference on applying", "Transform as a Layer \"\"\" def __init__(self, *args, **kwargs): super(STFT,", "return half FFT bins center: if true, we assumed to", "if mode == \"librosa\" and B != frame_len: lpad =", "math.pi)) weight = np.sinc( times * zeros_per_block) * window *", "choose round(#power_of_two) as the FFT size normalized: return normalized DFT", "-> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT inner function Args: wav", "onesided self.pre_emphasis = pre_emphasis self.center = center self.mode = mode", "= True, inverse: bool = False, center: bool = False,", "\"sqrthann\", round_pow_of_two: bool = True, pre_emphasis: float = 0, normalized:", "\"\"\" def __init__(self, *args, **kwargs): super(iSTFT, self).__init__(*args, inverse=True, **kwargs) def", "bins produced by STFT num_mels: number of the mel bands", "1 x 2B x W, NC x W x T,", "mel bands fmin: lowest frequency (in Hz) fmax: highest frequency", "size between frames window: window name center: center flag (similar", "0.0) * (0.5 + 0.5 * np.cos(times / padding *", "+ 1 def extra_repr(self) -> str: return self.expr class STFT(STFTBase):", "onesided={onesided}, \" + f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \" + f\"center={self.center}, mode={self.mode}, \"", "T packed = th.cat([real, imag], dim=1) # N x 1", "difference on applying window function \"\"\" if isinstance(transform, th.Tensor): device", "length of the frame frame_hop: hop size between frames window:", "src_sr // gcd dst_sr = dst_sr // gcd if src_sr", "B # 2 x B x W K = th.transpose(K,", "FFT size normalized: use normalized DFT kernel pre_emphasis: factor of", "tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if center: pad = kernel.shape[-1] //", "K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True,", "feature context Return: splice (Tensor): feature with context padded \"\"\"", "size pre_emphasis: factor of preemphasis normalized: use normalized DFT kernel", "\"\"\" Inverse Short-time Fourier Transform as a Layer \"\"\" def", "bool = False, center: bool = False, mode=\"librosa\") -> None:", "input: input format (complex, real, polar) window: window name center:", "Args: frame_len: length of the frame round_pow_of_two: if true, choose", "\"complex\", window: str = \"sqrthann\", round_pow_of_two: bool = True, pre_emphasis:", "\"sqrthann\"]: raise RuntimeError(f\"Unknown window type: {wnd}\") wnd_tpl = { \"sqrthann\":", "th.Tensor]], frame_len: int, frame_hop: int, input: str = \"complex\", window:", "iDFT matrix mode: framing mode (librosa or kaldi) \"\"\" if", "S) # NC x 1 x S+2P if center: pad", "center: bool = False, mode: str = \"librosa\") -> Union[th.Tensor,", "raise ValueError(f\"Unknown output format: {output}\") if wav_dim not in [2,", "\"\"\" if th.sum(wav_len <= self.frame_len): raise RuntimeError( f\"Audio samples less", "pre_emphasis: factor of preemphasis mode: \"kaldi\"|\"librosa\", slight difference on applying", "isinstance(transform, th.Tensor): device = transform.device else: device = transform[0].device K,", "feature Args: feats (Tensor): N x ... x T x", "if pre_emphasis > 0: # NC x W x T", "half FFT bins center: used in _forward_stft Return: wav (Tensor),", "onesided: return half FFT bins center: used in _forward_stft Return:", "# N x S s = s.squeeze(1) return s def", "== \"polar\": real = transform[0] * th.cos(transform[1]) imag = transform[0]", "# (N) x F x T imag_dim = imag.dim() if", "downsample/upsample\") zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio padding = 1", "if inverse and not normalized: # to make K^H *", "bool = True, normalized: bool = False, pre_emphasis: float =", "({self.frame_len})\") kernel_size = self.K.shape[-1] if self.center: wav_len += kernel_size return", "= None, norm: bool = False) -> th.Tensor: \"\"\" Return", "pre_emphasis: factor of preemphasis onesided: return half FFT bins center:", "False, center: bool = False) -> th.Tensor: \"\"\" iSTFT inner", "onesided: num_bins = kernel.shape[0] // 4 + 1 real =", "frame_len ({self.frame_len})\") kernel_size = self.K.shape[-1] if self.center: wav_len += kernel_size", "applying window function onesided: output onesided STFT inverse: using iDFT", "x T imag_dim = imag.dim() if imag_dim not in [2,", "x (C) x B/2+1 x T if onesided: num_bins =", "center: bool = False, mode=\"librosa\") -> None: super(STFTBase, self).__init__() K,", "from init_kernel(...) input (str), input format: polar: return (magnitude, phase)", "= transform[0].device K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two,", "context padded \"\"\" if lctx + rctx == 0: return", "str = \"librosa\") -> th.Tensor: \"\"\" Return STFT kernels Args:", "False, center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\"", "(C) x S kernel (Tensor), STFT transform kernels, from init_kernel(...)", "left context rctx: right context subsampling_factor: subsampling factor op: operator", "hop size between frames output: output type (complex, real, polar)", "N x B x T real = th.cat([real, real[:, reverse]],", "= False) -> th.Tensor: \"\"\" Return mel filter coefficients Args:", "input (str), input format: polar: return (magnitude, phase) pair complex:", "name round_pow_of_two: if true, choose round(#power_of_two) as the FFT size", "else: real, imag = transform # (N) x F x", "-1) return splice def _forward_stft( wav: th.Tensor, kernel: th.Tensor, output:", "// 2 window = tf.pad(window, (lpad, B - frame_len -", "S = 1 I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1) #", "s = s.squeeze(1) return s def forward_stft( wav: th.Tensor, frame_len:", "= \"sqrthann\", round_pow_of_two: bool = True, normalized: bool = False,", "_forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor,", "dim=-1) else: mag = (real**2 + imag**2 + EPSILON)**0.5 pha", "if op == \"cat\": # N x ... x T", "1)[None, None, :] + padding) window = np.heaviside(1 - np.abs(times", "lctx: int = 1, rctx: int = 1, subsampling_factor: int", "wav_dim = wav.dim() if output not in [\"polar\", \"complex\", \"real\"]:", "s.squeeze(1) return s def forward_stft( wav: th.Tensor, frame_len: int, frame_hop:", "output: output type (complex, real, polar) window: window name center:", "raise RuntimeError(f\"Unknown window type: {wnd}\") wnd_tpl = { \"sqrthann\": sqrthann,", "real) return (mag, pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel:", "mode=mode) return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def", "x F x T \"\"\" return _forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop,", "kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] = frames[:, 1:] - pre_emphasis", "1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1] #", "window: window name center: center flag (similar with that in", "the mel filter coefficients \"\"\" # FFT points if num_bins", "f\"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}\") gcd =", "th.Tensor, window: th.Tensor, input: str = \"polar\", frame_hop: int =", "in _forward_stft Return: wav (Tensor), N x S \"\"\" if", "if fmax is None: fmax = freq_upper else: fmax =", "x 1 x S N, S = wav.shape[0], wav.shape[-1] wav", "(Tensor), STFT transform kernels, from init_kernel(...) input (str), input format:", "bool = False, inverse: bool = False, mode: str =", "implementation, equals to iSTFT layer Args: transform: results of STFT", "0.0, fmax: Optional[float] = None, norm: bool = False) ->", "subsampling_factor: int = 1, op: str = \"cat\") -> th.Tensor:", "2019 <NAME> # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import", "lpad)) if normalized: # make K^H * K = I", "F x T, reshape 1 x F x T if", "inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop: int, input: str", "Tuple[th.Tensor, th.Tensor]]: \"\"\" Accept (single or multiple channel) raw waveform", "fmax: Optional[float] = None, norm: bool = False) -> th.Tensor:", "3]: raise RuntimeError(f\"Expect 2D/3D tensor, but got {imag_dim}D\") # if", "tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) # normalized audio samples # refer:", "self.onesided = onesided self.pre_emphasis = pre_emphasis self.center = center self.mode", "x W I = th.eye(window.shape[0], device=win.device)[:, None] # 1 x", "= transform[0] * th.sin(transform[1]) else: real, imag = transform #", "= I K = K / B # 2 x", ":] if output == \"complex\": return (real, imag) elif output", "signal Return: weight (Tensor): coefficients of the filter \"\"\" if", "onesided=self.onesided, center=self.center) class iSTFT(STFTBase): \"\"\" Inverse Short-time Fourier Transform as", "imag = transform[..., 0], transform[..., 1] elif input == \"polar\":", "pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int,", "x T \"\"\" return _forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided,", "1) * 2 # fmin & fmax freq_upper = sr", "th.Tensor, output: str = \"polar\" ) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:", "times * zeros_per_block) * window * zeros_per_block / float(src_sr) return", "self).__init__() K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized,", "1 x T norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if", "N x 1 x T s = tf.conv_transpose1d(packed, kernel, stride=frame_hop,", "= tf.pad(window, (lpad, B - frame_len - lpad)) if normalized:", "x C x 2B x T if wav_dim == 3:", "[2, 3]: raise RuntimeError(f\"Expect 2D/3D tensor, but got {imag_dim}D\") #", "{imag_dim}D\") # if F x T, reshape 1 x F", "2: real = th.unsqueeze(real, 0) imag = th.unsqueeze(imag, 0) if", "{wav_dim:d}D\") # if N x S, reshape N x 1", "== \"librosa\" and B != frame_len: lpad = (B -", "{wnd}\") wnd_tpl = { \"sqrthann\": sqrthann, \"hann\": th.hann_window, \"hamm\": th.hamming_window,", "-> None: super(STFTBase, self).__init__() K, w = init_kernel(frame_len, frame_hop, init_window(window,", "# FFT points if num_bins is None: N = 2**math.ceil(", "return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform:", "fmax freq_upper = sr // 2 if fmax is None:", "if imag_dim not in [2, 3]: raise RuntimeError(f\"Expect 2D/3D tensor,", "name center: center flag (similar with that in librosa.stft) round_pow_of_two:", "as nn import torch.nn.functional as tf import librosa.filters as filters", "int, frame_hop: int, output: str = \"complex\", window: str =", "False) -> th.Tensor: \"\"\" Return mel filter coefficients Args: frame_len:", "N x S \"\"\" return _inverse_stft(transform, self.K, self.w, input=input, frame_hop=self.frame_hop,", "Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT inner function Args: wav (Tensor), N", "sample rate of the target signal Return: weight (Tensor): coefficients", "= th.unsqueeze(real, 0) imag = th.unsqueeze(imag, 0) if onesided: #", "(Tensor) input signal, N x (C) x S Return transform", "[2, 3]: raise RuntimeError(f\"STFT expect 2D/3D tensor, but got {wav_dim:d}D\")", "x S \"\"\" return _inverse_stft(transform, self.K, self.w, input=input, frame_hop=self.frame_hop, onesided=self.onesided,", "* window # 2B x 1 x W K =", "ValueError(f\"Unknown output format: {output}\") if wav_dim not in [2, 3]:", "fmin) # mel filter coefficients mel = filters.mel(sr, N, n_mels=num_mels,", "str: return self.expr class STFT(STFTBase): \"\"\" Short-time Fourier Transform as", "between frames window: window name center: center flag (similar with", "stride=frame_hop, padding=0) if center: pad = kernel.shape[-1] // 2 s", "def splice_feature(feats: th.Tensor, lctx: int = 1, rctx: int =", "= nn.Parameter(w, requires_grad=False) self.frame_len = frame_len self.frame_hop = frame_hop self.onesided", "= 64) -> th.Tensor: \"\"\" Return speed perturb filters, reference:", "= src_sr // gcd dst_sr = dst_sr // gcd if", "= transform[..., 0], transform[..., 1] elif input == \"polar\": real", "stride=frame_hop, padding=0) # NC x 2B x T => N", "== \"cat\": # N x ... x T x FD", "return feats if op not in [\"cat\", \"stack\"]: raise ValueError(f\"Unknown", "N x S \"\"\" if input not in [\"polar\", \"complex\",", "match with librosa wav = tf.pad(wav, (pad, pad), mode=\"reflect\") #", "0.5 * np.cos(times / padding * math.pi)) weight = np.sinc(", "(http://www.apache.org/licenses/LICENSE-2.0) import math import numpy as np import torch as", "num_mels: number of the mel bands fmin: lowest frequency (in", "tf.conv1d(wav, kernel, stride=frame_hop, padding=0) # NC x 2B x T", "+ 0.5 * np.cos(times / padding * math.pi)) weight =", "num_mels x (N // 2 + 1) return th.tensor(mel, dtype=th.float32)", "K / B # 2 x B x W K", "coefficients mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm=\"slaney\"", "(Tensor), STFT transform kernels, from init_kernel(...) output (str), output format:", "of preemphasis onesided: return half FFT bins center: if true,", "gcd if src_sr == 1 or dst_sr == 1: raise", "device = transform[0].device K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len),", "pre_emphasis * frames[:, :-1] # 1 x 2B x W,", "lowest frequency (in Hz) fmax: highest frequency (in Hz) norm:", "return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int = 1,", "__init__(self, frame_len: int, frame_hop: int, window: str = \"sqrthann\", round_pow_of_two:", "} if wnd != \"rect\": # match with librosa c", "(in Hz) fmax: highest frequency (in Hz) norm: normalize the", "window # 2B x 1 x W K = th.reshape(K,", "but got {imag_dim}D\") # if F x T, reshape 1", "K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor,", "2B x T packed = th.cat([real, imag], dim=1) # N", "stride=frame_hop, padding=0) frames[:, 1:] = frames[:, 1:] - pre_emphasis *", "return _inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center) class STFTBase(nn.Module):", "= True, normalized: bool = False, pre_emphasis: float = 0,", "!= frame_len: lpad = (B - frame_len) // 2 window", "if src_sr == dst_sr: raise ValueError( f\"src_sr should not be", "B/2+1 x T if onesided: num_bins = kernel.shape[0] // 4", "NC x W x T frames = tf.unfold(wav[:, None], (1,", "imag = th.unsqueeze(imag, 0) if onesided: # [self.num_bins - 2,", "output format: {input}\") if input == \"real\": real, imag =", "onesided: # [self.num_bins - 2, ..., 1] reverse = range(kernel.shape[0]", "got {imag_dim}D\") # if F x T, reshape 1 x", "the FFT size num_bins: number of the frequency bins produced", "packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1]) # N x (C)", "to have centered frames Return: transform (Tensor or [Tensor, Tensor]),", "2 if fmax is None: fmax = freq_upper else: fmax", "init_kernel(...) input (str), input format: polar: return (magnitude, phase) pair", "normalized DFT matrix inverse: return iDFT matrix mode: framing mode", "True, inverse: bool = False, center: bool = False, mode=\"librosa\")", "of preemphasis mode: \"kaldi\"|\"librosa\", slight difference on applying window function", "NC x 2B x T packed = th.matmul(kernel[:, 0][None, ...],", "packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0) # NC x 2B", "int = 1, subsampling_factor: int = 1, op: str =", "tensor, but got {wav_dim:d}D\") # if N x S, reshape", ":num_bins, :] imag = imag[..., :num_bins, :] if output ==", "str = \"polar\", pre_emphasis: float = 0, frame_hop: int =", "= wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1, S) # NC", "= True, pre_emphasis: float = 0, normalized: bool = False,", "= range(kernel.shape[0] // 4 - 1, 0, -1) # extend", "as a Layer \"\"\" def __init__(self, *args, **kwargs): super(iSTFT, self).__init__(*args,", "is None: fmax = freq_upper else: fmax = min(fmax +", "filter coefficients \"\"\" # FFT points if num_bins is None:", "Transform as a Layer \"\"\" def __init__(self, *args, **kwargs): super(iSTFT,", "x S s = s.squeeze(1) return s def forward_stft( wav:", "- np.abs(times / padding), 0.0) * (0.5 + 0.5 *", "frame_hop=frame_hop, onesided=onesided, center=center) class STFTBase(nn.Module): \"\"\" Base layer for (i)STFT", "if needed if mode == \"librosa\" and B != frame_len:", "1 I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1) # W x", "dim=-1) # W x B x 2 K = th.fft(I", "N x ... x T x FD splice = th.cat(ctx,", "in [2, 3]: raise RuntimeError(f\"STFT expect 2D/3D tensor, but got", "function \"\"\" K, _ = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two,", "output raw waveform Args transform (Tensor or [Tensor, Tensor]), STFT", "with librosa wav = tf.pad(wav, (pad, pad), mode=\"reflect\") # STFT", "packed = th.cat([real, imag], dim=1) # N x 1 x", "x F, ...] ctx = [] T = feats.shape[-2] T", "frame_hop self.onesided = onesided self.pre_emphasis = pre_emphasis self.center = center", "self.mode = mode self.num_bins = self.K.shape[0] // 4 + 1", "K, window def mel_filter(frame_len: int, round_pow_of_two: bool = True, num_bins:", "def __init__(self, *args, **kwargs): super(iSTFT, self).__init__(*args, inverse=True, **kwargs) def forward(self,", "function implementation, equals to STFT layer Args: wav: source audio", "mel filter coefficients mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin,", "* window * zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32) def", "kernel_size = self.K.shape[-1] if self.center: wav_len += kernel_size return (wav_len", "waveform Args transform (Tensor or [Tensor, Tensor]), STFT output Return", "Return: weight (Tensor): coefficients of the filter \"\"\" if src_sr", "nn import torch.nn.functional as tf import librosa.filters as filters from", "\"\"\" iSTFT inner function Args: transform (Tensor or [Tensor, Tensor]),", "Tuple[th.Tensor, th.Tensor]], input: str = \"polar\") -> th.Tensor: \"\"\" Accept", "norm[..., pad:-pad] s = s / (norm + EPSILON) #", "zeros_per_block) # dst_sr x src_sr x K times = (np.arange(dst_sr)[:,", "2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len # center padding window if", "frame_len: int, frame_hop: int, window: str = \"sqrthann\", round_pow_of_two: bool", "float(dst_sr) - np.arange(src_sr)[None, :, None] / float(src_sr) - np.arange(2 *", "Return: splice (Tensor): feature with context padded \"\"\" if lctx", "[\"cat\", \"stack\"]: raise ValueError(f\"Unknown op for feature splicing: {op}\") #", "str = \"librosa\") -> th.Tensor: \"\"\" iSTFT function implementation, equals", "Layer \"\"\" def __init__(self, *args, **kwargs): super(iSTFT, self).__init__(*args, inverse=True, **kwargs)", "mel_filter(frame_len: int, round_pow_of_two: bool = True, num_bins: Optional[int] = None,", "int = 1, op: str = \"cat\") -> th.Tensor: \"\"\"", "S \"\"\" if input not in [\"polar\", \"complex\", \"real\"]: raise", "transform: results of STFT frame_len: length of the frame frame_hop:", "packed.shape[-1]) # N x (C) x B x T real,", "// 2 s = s[..., pad:-pad] norm = norm[..., pad:-pad]", "onesided STFT mode: \"kaldi\"|\"librosa\", slight difference on applying window function", "cutoff_ratio: float = 0.95, num_zeros: int = 64) -> th.Tensor:", "* np.cos(times / padding * math.pi)) weight = np.sinc( times", "size normalized: use normalized DFT kernel pre_emphasis: factor of preemphasis", "frame_hop: int, input: str = \"complex\", window: str = \"sqrthann\",", ":-1] # 1 x 2B x W, NC x W", "true, choose round(#power_of_two) as the FFT size pre_emphasis: factor of", "source audio signal frame_len: length of the frame frame_hop: hop", "# N x (C) x B/2+1 x T if onesided:", "matrix mode: framing mode (librosa or kaldi) \"\"\" if mode", "* K = I K = K / B #", "{src_sr}/{dst_sr}\") gcd = math.gcd(src_sr, dst_sr) src_sr = src_sr // gcd", "assumed to have centered frames Return: transform (Tensor or [Tensor,", "bool = False, center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor,", "real[..., :num_bins, :] imag = imag[..., :num_bins, :] if output", "kernel (Tensor), STFT transform kernels, from init_kernel(...) output (str), output", "op: operator on feature context Return: splice (Tensor): feature with", "RuntimeError( f\"Audio samples less than frame_len ({self.frame_len})\") kernel_size = self.K.shape[-1]", "preemphasis mode: \"kaldi\"|\"librosa\", slight difference on applying window function onesided:", "sr: int = 16000, num_mels: int = 80, fmin: float", "frame_hop: hop size between frames input: input format (complex, real,", "return s def forward_stft( wav: th.Tensor, frame_len: int, frame_hop: int,", "( f\"window={window}, stride={frame_hop}, onesided={onesided}, \" + f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \" +", "from aps.const import EPSILON from typing import Optional, Union, Tuple", "transform[..., 1] elif input == \"polar\": real = transform[0] *", "return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float", ":] + padding) window = np.heaviside(1 - np.abs(times / padding),", "th.ones } if wnd != \"rect\": # match with librosa", "{mode}\") # FFT points B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else", "init_kernel(...) output (str), output format: polar: return (magnitude, phase) pair", "# pack: N x 2B x T packed = th.cat([real,", "np.arange(2 * padding + 1)[None, None, :] + padding) window", "Compute number of the frames \"\"\" if th.sum(wav_len <= self.frame_len):", "+ T, device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0, max=T -", "mode: \"kaldi\"|\"librosa\", slight difference on applying window function onesided: output", "less than frame_len ({self.frame_len})\") kernel_size = self.K.shape[-1] if self.center: wav_len", "= th.reshape(K, (B * 2, 1, K.shape[-1])) return K, window", "= th.cat([real, real[:, reverse]], 1) imag = th.cat([imag, -imag[:, reverse]],", "results kernel (Tensor), STFT transform kernels, from init_kernel(...) input (str),", "output format: polar: return (magnitude, phase) pair complex: return (real,", "output: str = \"polar\" ) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\"", "T win = th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1) # W", "onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop:", "raw waveform Args transform (Tensor or [Tensor, Tensor]), STFT output", "n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm=\"slaney\" if norm else None) #", "= math.gcd(src_sr, dst_sr) src_sr = src_sr // gcd dst_sr =", "\"polar\", frame_hop: int = 256, onesided: bool = False, center:", "Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate", "None: N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len else:", "function implementation, equals to iSTFT layer Args: transform: results of", "int, input: str = \"complex\", window: str = \"sqrthann\", round_pow_of_two:", "frame_hop: hop size between frames output: output type (complex, real,", "frame_hop: int = 256, onesided: bool = False, center: bool", "context Return: splice (Tensor): feature with context padded \"\"\" if", "(real, imag) elif output == \"real\": return th.stack([real, imag], dim=-1)", "th.Tensor]]: \"\"\" Accept (single or multiple channel) raw waveform and", "self.w = nn.Parameter(w, requires_grad=False) self.frame_len = frame_len self.frame_hop = frame_hop", "of the frame frame_hop: hop size between frames input: input", "else: mag = (real**2 + imag**2 + EPSILON)**0.5 pha =", "transform[0].device K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized,", "F, original feature lctx: left context rctx: right context subsampling_factor:", "center: pad = kernel.shape[-1] // 2 # NOTE: match with", "using iDFT kernel (for iSTFT) mode: \"kaldi\"|\"librosa\", slight difference on", "Tuple def init_window(wnd: str, frame_len: int) -> th.Tensor: \"\"\" Return", "input == \"polar\": real = transform[0] * th.cos(transform[1]) imag =", "x T if imag_dim == 2: real = th.unsqueeze(real, 0)", "& fmax freq_upper = sr // 2 if fmax is", "True, pre_emphasis: float = 0, normalized: bool = False, onesided:", "= False, mode: str = \"librosa\") -> th.Tensor: \"\"\" iSTFT", "samples onesided: return half FFT bins center: used in _forward_stft", "B)], dim=-1) # W x B x 2 K =", "if normalized: # make K^H * K = I S", "kernel (for iSTFT) mode: \"kaldi\"|\"librosa\", slight difference on applying window", "= frame_len self.frame_hop = frame_hop self.onesided = onesided self.pre_emphasis =", "frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform, K.to(device), w.to(device), input=input,", "return th.stack([real, imag], dim=-1) else: mag = (real**2 + imag**2", "K times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) - np.arange(src_sr)[None,", "RuntimeError(f\"Unknown window type: {wnd}\") wnd_tpl = { \"sqrthann\": sqrthann, \"hann\":", "results of STFT frame_len: length of the frame frame_hop: hop", "(Tensor), N x (C) x S kernel (Tensor), STFT transform", "(C) x F x T \"\"\" return _forward_stft(wav, self.K, output=output,", "/ B # 2 x B x W K =", "format: {output}\") if wav_dim not in [2, 3]: raise RuntimeError(f\"STFT", "\"\"\" K, _ = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized,", "= tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if center: pad = kernel.shape[-1]", "th.arange(c, c + T, device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0,", "input: str = \"polar\") -> th.Tensor: \"\"\" Accept phase &", "inner function Args: wav (Tensor), N x (C) x S", "padding=0) frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:,", "== \"kaldi\": K = K[:frame_len] if inverse and not normalized:", "if input == \"real\": real, imag = transform[..., 0], transform[...,", "def __init__(self, frame_len: int, frame_hop: int, window: str = \"sqrthann\",", "STFT num_mels: number of the mel bands fmin: lowest frequency", "= th.fft(I / S, 1) if mode == \"kaldi\": K", "int(num_zeros / zeros_per_block) # dst_sr x src_sr x K times", "signal frame_len: length of the frame frame_hop: hop size between", "pre_emphasis: float = 0, normalized: bool = False, onesided: bool", "if lctx + rctx == 0: return feats if op", "samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W x T", "== \"complex\": return (real, imag) elif output == \"real\": return", "Args: transform (Tensor or [Tensor, Tensor]), STFT transform results kernel", "op == \"cat\": # N x ... x T x", "\"\"\" def __init__(self, *args, **kwargs): super(STFT, self).__init__(*args, inverse=False, **kwargs) def", "== 0: return feats if op not in [\"cat\", \"stack\"]:", "min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)", "requires_grad=False) self.frame_len = frame_len self.frame_hop = frame_hop self.onesided = onesided", "# if N x S, reshape N x 1 x", "th.bartlett_window, \"rect\": th.ones } if wnd != \"rect\": # match", "& magnitude and output raw waveform Args transform (Tensor or", "th.unsqueeze(real, 0) imag = th.unsqueeze(imag, 0) if onesided: # [self.num_bins", "def forward_stft( wav: th.Tensor, frame_len: int, frame_hop: int, output: str", "1 x S N, S = wav.shape[0], wav.shape[-1] wav =", "(Tensor), N x S \"\"\" if input not in [\"polar\",", "wav.view(-1, 1, S) # NC x 1 x S+2P if", "\"librosa\") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT function implementation, equals", "bool = False, mode=\"librosa\") -> None: super(STFTBase, self).__init__() K, w", "and output raw waveform Args transform (Tensor or [Tensor, Tensor]),", "import EPSILON from typing import Optional, Union, Tuple def init_window(wnd:", "(B * 2, 1, K.shape[-1])) return K, window def mel_filter(frame_len:", "refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W x T win =", "normalized DFT kernel pre_emphasis: factor of preemphasis mode: \"kaldi\"|\"librosa\", slight", "self.frame_len = frame_len self.frame_hop = frame_hop self.onesided = onesided self.pre_emphasis", "self.frame_hop + 1 def extra_repr(self) -> str: return self.expr class", "mode: str = \"librosa\") -> th.Tensor: \"\"\" Return STFT kernels", "x F x D splice = th.stack(ctx, -1) return splice", "w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode)", "iSTFT inner function Args: transform (Tensor or [Tensor, Tensor]), STFT", "frame_hop: int, window: str = \"sqrthann\", round_pow_of_two: bool = True,", "th.Tensor: \"\"\" Accept phase & magnitude and output raw waveform", "mode=mode) self.K = nn.Parameter(K, requires_grad=False) self.w = nn.Parameter(w, requires_grad=False) self.frame_len", "real = th.unsqueeze(real, 0) imag = th.unsqueeze(imag, 0) if onesided:", "super(STFTBase, self).__init__() K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two,", "(mag, pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window:", "filter coefficients mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True,", "False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT inner function Args:", "self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class iSTFT(STFTBase): \"\"\" Inverse", "0: return feats if op not in [\"cat\", \"stack\"]: raise", "_ = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode)", "# N x ... x T x F x D", "1, 0, -1) # extend matrix: N x B x", "W K = th.transpose(K, 0, 2) * window # 2B", "tf import librosa.filters as filters from aps.const import EPSILON from", "<NAME> # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import numpy", "format (complex, real, polar) window: window name center: center flag", "-2, idx)) if op == \"cat\": # N x ...", "using iDFT kernel (for iSTFT) \"\"\" def __init__(self, frame_len: int,", "bins center: used in _forward_stft Return: wav (Tensor), N x", "imag], dim=-1) else: mag = (real**2 + imag**2 + EPSILON)**0.5", "= dst_sr // gcd if src_sr == 1 or dst_sr", "N, S = wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1, S)", "\"librosa\" and B != frame_len: lpad = (B - frame_len)", "x ... x T x F, ...] ctx = []", "FFT points B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len #", "x D splice = th.stack(ctx, -1) return splice def _forward_stft(", "round_pow_of_two else frame_len else: N = (num_bins - 1) *", "magnitude and phase Args wav (Tensor) input signal, N x", "not be equal to dst_sr: {src_sr}/{dst_sr}\") gcd = math.gcd(src_sr, dst_sr)", "str = \"librosa\") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT function", "\"cat\": # N x ... x T x FD splice", "= 0, normalized: bool = False, onesided: bool = True,", "== dst_sr: raise ValueError( f\"src_sr should not be equal to", "pre_emphasis > 0: # NC x W x T frames", "format: polar: return (magnitude, phase) pair complex: return (real, imag)", "self.frame_hop = frame_hop self.onesided = onesided self.pre_emphasis = pre_emphasis self.center", "if mode not in [\"librosa\", \"kaldi\"]: raise ValueError(f\"Unsupported mode: {mode}\")", "= True, normalized: bool = False, onesided: bool = True,", "frame_len else: N = (num_bins - 1) * 2 #", "= 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len # center padding window", "= norm[..., pad:-pad] s = s / (norm + EPSILON)", "= (np.arange(dst_sr)[:, None, None] / float(dst_sr) - np.arange(src_sr)[None, :, None]", "-> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" Accept (single or multiple channel)", "None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] = frames[:, 1:]", "= th.arange(c, c + T, device=feats.device, dtype=th.int64) idx = th.clamp(idx,", "cutoff_ratio padding = 1 + int(num_zeros / zeros_per_block) # dst_sr", "th.Tensor: \"\"\" Splice feature Args: feats (Tensor): N x ...", "a Layer \"\"\" def __init__(self, *args, **kwargs): super(STFT, self).__init__(*args, inverse=False,", "\"librosa\") -> th.Tensor: \"\"\" Return STFT kernels Args: frame_len: length", "# W x B x 2 K = th.fft(I /", "kernel_size return (wav_len - kernel_size) // self.frame_hop + 1 def", "if F x T, reshape 1 x F x T", "N x 2B x T packed = th.cat([real, imag], dim=1)", "normalized: # make K^H * K = I S =", "**kwargs): super(iSTFT, self).__init__(*args, inverse=True, **kwargs) def forward(self, transform: Union[th.Tensor, Tuple[th.Tensor,", "-> th.Tensor: \"\"\" iSTFT inner function Args: transform (Tensor or", "in [\"librosa\", \"kaldi\"]: raise ValueError(f\"Unsupported mode: {mode}\") # FFT points", "padding) window = np.heaviside(1 - np.abs(times / padding), 0.0) *", "frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform, K.to(device),", "integer downsample/upsample\") zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio padding =", "return self.expr class STFT(STFTBase): \"\"\" Short-time Fourier Transform as a", "= \"polar\", frame_hop: int = 256, onesided: bool = False,", "transform.device else: device = transform[0].device K, w = init_kernel(frame_len, frame_hop,", "framing mode (librosa or kaldi) \"\"\" if mode not in", "frequency (in Hz) fmax: highest frequency (in Hz) norm: normalize", "float = 0, frame_hop: int = 256, onesided: bool =", "of the frequency bins produced by STFT num_mels: number of", "Inverse Short-time Fourier Transform as a Layer \"\"\" def __init__(self,", "imag) elif output == \"real\": return th.stack([real, imag], dim=-1) else:", "x T real = th.cat([real, real[:, reverse]], 1) imag =", "= s / (norm + EPSILON) # N x S", "Tensor]), STFT transform results kernel (Tensor), STFT transform kernels, from", "wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len:", "number of the frequency bins produced by STFT num_mels: number", "window name center: center flag (similar with that in librosa.stft)", "self.K.shape[-1] if self.center: wav_len += kernel_size return (wav_len - kernel_size)", "init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device), output=output,", "T x FD splice = th.cat(ctx, -1) else: # N", "N x (C) x F x T \"\"\" return _forward_stft(wav,", "as a Layer \"\"\" def __init__(self, *args, **kwargs): super(STFT, self).__init__(*args,", "transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], input: str = \"polar\") -> th.Tensor:", "between frames window: window name round_pow_of_two: if true, choose round(#power_of_two)", "frame frame_hop: hop size between frames input: input format (complex,", "B x W K = th.transpose(K, 0, 2) * window", "= pre_emphasis self.center = center self.mode = mode self.num_bins =", "-> th.Tensor: \"\"\" Accept phase & magnitude and output raw", "half FFT bins center: if true, we assumed to have", "# STFT if pre_emphasis > 0: # NC x W", "dim=-2) # N x (C) x B/2+1 x T if", "super(STFT, self).__init__(*args, inverse=False, **kwargs) def forward( self, wav: th.Tensor, output:", "kernel (Tensor), STFT transform kernels, from init_kernel(...) input (str), input", "window = tf.pad(window, (lpad, B - frame_len - lpad)) if", "3]: raise RuntimeError(f\"STFT expect 2D/3D tensor, but got {wav_dim:d}D\") #", "= transform # (N) x F x T imag_dim =", "import Optional, Union, Tuple def init_window(wnd: str, frame_len: int) ->", "-> th.Tensor: \"\"\" Splice feature Args: feats (Tensor): N x", "= kernel.shape[-1] // 2 s = s[..., pad:-pad] norm =", "as filters from aps.const import EPSILON from typing import Optional,", "target signal Return: weight (Tensor): coefficients of the filter \"\"\"", "between frames output: output type (complex, real, polar) window: window", "frame_len self.frame_hop = frame_hop self.onesided = onesided self.pre_emphasis = pre_emphasis", "output == \"real\": return th.stack([real, imag], dim=-1) else: mag =", "c in range(-lctx, rctx + 1): idx = th.arange(c, c", "th.unsqueeze(imag, 0) if onesided: # [self.num_bins - 2, ..., 1]", "th.Tensor, kernel: th.Tensor, output: str = \"polar\", pre_emphasis: float =", "(librosa or kaldi) \"\"\" if mode not in [\"librosa\", \"kaldi\"]:", "None: super(STFTBase, self).__init__() K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len),", "(lpad, B - frame_len - lpad)) if normalized: # make", "mode: {mode}\") # FFT points B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two", "+ 1)[None, None, :] + padding) window = np.heaviside(1 -", "// 2 # NOTE: match with librosa wav = tf.pad(wav,", "audio signal frame_len: length of the frame frame_hop: hop size", "iSTFT(STFTBase): \"\"\" Inverse Short-time Fourier Transform as a Layer \"\"\"", "T frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:,", "= True, center: bool = False, mode: str = \"librosa\")", "= 1 I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1) # W", "num_bins = kernel.shape[0] // 4 + 1 real = real[...,", "real, imag = transform # (N) x F x T", "coefficients Args: frame_len: length of the frame round_pow_of_two: if true,", "\"stack\"]: raise ValueError(f\"Unknown op for feature splicing: {op}\") # [N", "normalized: # to make K^H * K = I K", "on applying window function onesided: output onesided STFT inverse: using", "the frequency bins produced by STFT num_mels: number of the", "x S kernel (Tensor), STFT transform kernels, from init_kernel(...) output", "# 1 x 1 x T norm = tf.conv_transpose1d(win**2, I,", "= 1, rctx: int = 1, subsampling_factor: int = 1,", "with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as", "round_pow_of_two: bool = True, normalized: bool = False, pre_emphasis: float", "[N x ... x T x F, ...] ctx =", "init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode) self.K = nn.Parameter(K, requires_grad=False)", ":num_bins, :] if output == \"complex\": return (real, imag) elif", ") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" Accept (single or multiple", "size between frames input: input format (complex, real, polar) window:", "window coefficient Args: wnd: window name frame_len: length of the", "-> th.Tensor: \"\"\" iSTFT function implementation, equals to iSTFT layer", "wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1, S) # NC x", "norm: bool = False) -> th.Tensor: \"\"\" Return mel filter", "kernel.shape[-1] // 2 # NOTE: match with librosa wav =", "x K times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -", "if wav_dim == 3: packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])", "... x T x F, ...] ctx = [] T", "STFT mode: \"kaldi\"|\"librosa\", slight difference on applying window function \"\"\"", "torch as th import torch.nn as nn import torch.nn.functional as", "\"\"\" Accept (single or multiple channel) raw waveform and output", "norm else None) # num_mels x (N // 2 +", "S kernel (Tensor), STFT transform kernels, from init_kernel(...) output (str),", "x T x F, ...] ctx = [] T =", "fmax < 0 else fmax, freq_upper) fmin = max(0, fmin)", "iSTFT) mode: \"kaldi\"|\"librosa\", slight difference on applying window function \"\"\"", "center=center) class STFTBase(nn.Module): \"\"\" Base layer for (i)STFT Args: frame_len:", "sqrthann, \"hann\": th.hann_window, \"hamm\": th.hamming_window, \"blackman\": th.blackman_window, \"bartlett\": th.bartlett_window, \"rect\":", "inner function Args: transform (Tensor or [Tensor, Tensor]), STFT transform", "equals to iSTFT layer Args: transform: results of STFT frame_len:", "normalized: return normalized DFT matrix inverse: return iDFT matrix mode:", "x (C) x F x T \"\"\" return _forward_stft(wav, self.K,", "nn.Parameter(w, requires_grad=False) self.frame_len = frame_len self.frame_hop = frame_hop self.onesided =", "x F x T imag_dim = imag.dim() if imag_dim not", "/ padding), 0.0) * (0.5 + 0.5 * np.cos(times /", "frame_hop: int, output: str = \"complex\", window: str = \"sqrthann\",", "packed.view(N, -1, packed.shape[-2], packed.shape[-1]) # N x (C) x B", "self.expr = ( f\"window={window}, stride={frame_hop}, onesided={onesided}, \" + f\"pre_emphasis={self.pre_emphasis}, normalized={normalized},", "(wav_len - kernel_size) // self.frame_hop + 1 def extra_repr(self) ->", "K, _ = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False,", "input format (complex, real, polar) window: window name center: center", "K = K / B # 2 x B x", "(complex, real, polar) window: window name center: center flag (similar", "factor op: operator on feature context Return: splice (Tensor): feature", ":] imag = imag[..., :num_bins, :] if output == \"complex\":", "\"sqrthann\", round_pow_of_two: bool = True, normalized: bool = False, onesided:", "as the FFT size normalized: use normalized DFT kernel onesided:", "B x 2 K = th.fft(I / S, 1) if", "in number samples pre_emphasis: factor of preemphasis onesided: return half", "0, frame_hop: int = 256, onesided: bool = False, center:", "th.cat([imag, -imag[:, reverse]], 1) # pack: N x 2B x", "th.Tensor: \"\"\" iSTFT function implementation, equals to iSTFT layer Args:", "we assumed to have centered frames Return: transform (Tensor or", "\"\"\" wav_dim = wav.dim() if output not in [\"polar\", \"complex\",", "frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode) self.K = nn.Parameter(K, requires_grad=False) self.w", "wav (Tensor), N x (C) x S kernel (Tensor), STFT", "true, choose round(#power_of_two) as the FFT size num_bins: number of", "packed = th.matmul(kernel[:, 0][None, ...], frames) else: packed = tf.conv1d(wav,", "fmax=fmax, fmin=fmin, htk=True, norm=\"slaney\" if norm else None) # num_mels", "64) -> th.Tensor: \"\"\" Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py", "frames) else: packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0) # NC", "int, round_pow_of_two: bool = True, num_bins: Optional[int] = None, sr:", "preemphasis normalized: use normalized DFT kernel onesided: output onesided STFT", "= B**0.5 else: S = 1 I = th.stack([th.eye(B), th.zeros(B,", "2B x W, NC x W x T, NC x", "# normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x", "the mel bands fmin: lowest frequency (in Hz) fmax: highest", "fmax = freq_upper else: fmax = min(fmax + freq_upper if", "frame_hop: hop size between frames window: window name round_pow_of_two: if", "W x B x 2 K = th.fft(I / S,", "+ int(num_zeros / zeros_per_block) # dst_sr x src_sr x K", "Union, Tuple def init_window(wnd: str, frame_len: int) -> th.Tensor: \"\"\"", "np.sinc( times * zeros_per_block) * window * zeros_per_block / float(src_sr)", "or multiple channel) raw waveform and output magnitude and phase", "choose round(#power_of_two) as the FFT size num_bins: number of the", "wav: th.Tensor, frame_len: int, frame_hop: int, output: str = \"complex\",", "= frames[:, 1:] - pre_emphasis * frames[:, :-1] # 1", "c def init_kernel(frame_len: int, frame_hop: int, window: str, round_pow_of_two: bool", "if round_pow_of_two else frame_len else: N = (num_bins - 1)", "# dst_sr x src_sr x K times = (np.arange(dst_sr)[:, None,", "difference on applying window function \"\"\" K, _ = init_kernel(frame_len,", "= nn.Parameter(K, requires_grad=False) self.w = nn.Parameter(w, requires_grad=False) self.frame_len = frame_len", "original feature lctx: left context rctx: right context subsampling_factor: subsampling", "= th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats, -2, idx)) if", "NC x 2B x T => N x C x", "// 4 - 1, 0, -1) # extend matrix: N", "-1, packed.shape[-2], packed.shape[-1]) # N x (C) x B x", "F x T imag_dim = imag.dim() if imag_dim not in", "init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform,", "frame hop size in number samples onesided: return half FFT", "return half FFT bins center: used in _forward_stft Return: wav", "of the frame \"\"\" def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5", "B != frame_len: lpad = (B - frame_len) // 2", "th.atan2(imag, real) return (mag, pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],", "int = 1, rctx: int = 1, subsampling_factor: int =", "output onesided STFT inverse: using iDFT kernel (for iSTFT) \"\"\"", "x W K = th.reshape(K, (B * 2, 1, K.shape[-1]))", "float = 0.0, fmax: Optional[float] = None, norm: bool =", "T - T % subsampling_factor for c in range(-lctx, rctx", "# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import numpy as", "imag) pair real: return [real; imag] Tensor frame_hop: frame hop", "c + T, device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0, max=T", "periodic=True) else: c = wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len: int,", "{output}\") if wav_dim not in [2, 3]: raise RuntimeError(f\"STFT expect", ":, None] / float(src_sr) - np.arange(2 * padding + 1)[None,", "x W K = th.transpose(K, 0, 2) * window #", "-> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT function implementation, equals to", "# match with librosa c = wnd_tpl[wnd](frame_len, periodic=True) else: c", "produced by STFT num_mels: number of the mel bands fmin:", "be equal to dst_sr: {src_sr}/{dst_sr}\") gcd = math.gcd(src_sr, dst_sr) src_sr", "= ( f\"window={window}, stride={frame_hop}, onesided={onesided}, \" + f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \"", "W x 1 x W I = th.eye(window.shape[0], device=win.device)[:, None]", "(real**2 + imag**2 + EPSILON)**0.5 pha = th.atan2(imag, real) return", "# extend matrix: N x B x T real =", "if output == \"complex\": return (real, imag) elif output ==", "S N, S = wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1,", "def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop: int, input:", "\"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {output}\") if wav_dim not", "round_pow_of_two else frame_len # center padding window if needed if", "device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats,", "th.Tensor]]: \"\"\" STFT inner function Args: wav (Tensor), N x", "S = wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1, S) #", "imag] Tensor frame_hop: frame hop size in number samples pre_emphasis:", "x ... x T x F, original feature lctx: left", "audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W x", "frame_hop: frame hop size in number samples pre_emphasis: factor of", "2 # NOTE: match with librosa wav = tf.pad(wav, (pad,", "layer Args: wav: source audio signal frame_len: length of the", "\"\"\" Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample", "\"\"\" Short-time Fourier Transform as a Layer \"\"\" def __init__(self,", "src_sr = src_sr // gcd dst_sr = dst_sr // gcd", "if true, choose round(#power_of_two) as the FFT size normalized: return", "x T norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if center:", "rctx: right context subsampling_factor: subsampling factor op: operator on feature", "int, window: str, round_pow_of_two: bool = True, normalized: bool =", "splice def _forward_stft( wav: th.Tensor, kernel: th.Tensor, output: str =", "output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],", "* frames[:, :-1] # 1 x 2B x W, NC", "x ... x T x F x D splice =", "frequency bins produced by STFT num_mels: number of the mel", "if round_pow_of_two else frame_len # center padding window if needed", "1 real = real[..., :num_bins, :] imag = imag[..., :num_bins,", "padding = 1 + int(num_zeros / zeros_per_block) # dst_sr x", "= False, mode: str = \"librosa\") -> th.Tensor: \"\"\" Return", "the frames \"\"\" if th.sum(wav_len <= self.frame_len): raise RuntimeError( f\"Audio", "th.Tensor, frame_len: int, frame_hop: int, output: str = \"complex\", window:", "... x T x F x D splice = th.stack(ctx,", "choose round(#power_of_two) as the FFT size normalized: use normalized DFT", "4 - 1, 0, -1) # extend matrix: N x", "input not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format:", "normalize the mel filter coefficients \"\"\" # FFT points if", "rctx + 1): idx = th.arange(c, c + T, device=feats.device,", "+ 1): idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)", "= init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return", "center=self.center) class iSTFT(STFTBase): \"\"\" Inverse Short-time Fourier Transform as a", "x S \"\"\" if input not in [\"polar\", \"complex\", \"real\"]:", "else fmax, freq_upper) fmin = max(0, fmin) # mel filter", "x F x T if imag_dim == 2: real =", "= False, center: bool = False) -> th.Tensor: \"\"\" iSTFT", "= 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len else: N =", "+ 1 self.expr = ( f\"window={window}, stride={frame_hop}, onesided={onesided}, \" +", "# center padding window if needed if mode == \"librosa\"", "0: # NC x W x T frames = tf.unfold(wav[:,", "magnitude and output raw waveform Args transform (Tensor or [Tensor,", "x (C) x B x T real, imag = th.chunk(packed,", "elif output == \"real\": return th.stack([real, imag], dim=-1) else: mag", "return splice def _forward_stft( wav: th.Tensor, kernel: th.Tensor, output: str", "feature with context padded \"\"\" if lctx + rctx ==", "frame \"\"\" def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd", "f\"pre_emphasis={self.pre_emphasis}, normalized={normalized}, \" + f\"center={self.center}, mode={self.mode}, \" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def", "num_bins: Optional[int] = None, sr: int = 16000, num_mels: int", "S, 1) if mode == \"kaldi\": K = K[:frame_len] if", "x T if onesided: num_bins = kernel.shape[0] // 4 +", "T imag_dim = imag.dim() if imag_dim not in [2, 3]:", "* zeros_per_block) * window * zeros_per_block / float(src_sr) return th.tensor(weight,", "imag = th.cat([imag, -imag[:, reverse]], 1) # pack: N x", "norm=\"slaney\" if norm else None) # num_mels x (N //", "max(0, fmin) # mel filter coefficients mel = filters.mel(sr, N,", "= 0, frame_hop: int = 256, onesided: bool = False,", "raise RuntimeError(f\"STFT expect 2D/3D tensor, but got {wav_dim:d}D\") # if", "math.gcd(src_sr, dst_sr) src_sr = src_sr // gcd dst_sr = dst_sr", "NOTE: match with librosa wav = tf.pad(wav, (pad, pad), mode=\"reflect\")", "imag = transform[0] * th.sin(transform[1]) else: real, imag = transform", "extend matrix: N x B x T real = th.cat([real,", "-> th.Tensor: \"\"\" Return STFT kernels Args: frame_len: length of", "K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center) class STFTBase(nn.Module): \"\"\" Base", "T if onesided: num_bins = kernel.shape[0] // 4 + 1", "= th.cat([real, imag], dim=1) # N x 1 x T", "= 16000, num_mels: int = 80, fmin: float = 0.0,", "onesided: return half FFT bins center: if true, we assumed", "T => N x C x 2B x T if", "raise ValueError(\"do not support integer downsample/upsample\") zeros_per_block = min(src_sr, dst_sr)", "* th.sin(transform[1]) else: real, imag = transform # (N) x", "the FFT size normalized: return normalized DFT matrix inverse: return", "wav = wav.view(-1, 1, S) # NC x 1 x", "= \"librosa\") -> th.Tensor: \"\"\" iSTFT function implementation, equals to", "# to make K^H * K = I K =", "s = s[..., pad:-pad] norm = norm[..., pad:-pad] s =", "in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {input}\") if", "self.center: wav_len += kernel_size return (wav_len - kernel_size) // self.frame_hop", "iDFT kernel (for iSTFT) \"\"\" def __init__(self, frame_len: int, frame_hop:", "true, choose round(#power_of_two) as the FFT size normalized: use normalized", "x W x T, NC x 2B x T packed", "if center: pad = kernel.shape[-1] // 2 # NOTE: match", "aps.const import EPSILON from typing import Optional, Union, Tuple def", "op not in [\"cat\", \"stack\"]: raise ValueError(f\"Unknown op for feature", "B**0.5 else: S = 1 I = th.stack([th.eye(B), th.zeros(B, B)],", "center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop: int,", "th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats, -2, idx)) if op", "1) if mode == \"kaldi\": K = K[:frame_len] if inverse", "0.95, num_zeros: int = 64) -> th.Tensor: \"\"\" Return speed", "return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not in [\"bartlett\", \"hann\", \"hamm\",", "htk=True, norm=\"slaney\" if norm else None) # num_mels x (N", "librosa wav = tf.pad(wav, (pad, pad), mode=\"reflect\") # STFT if", "# mel filter coefficients mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax,", "2B x T if wav_dim == 3: packed = packed.view(N,", "[\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {input}\") if input", "def forward( self, wav: th.Tensor, output: str = \"polar\" )", "(C) x S Return transform (Tensor or [Tensor, Tensor]), N", "x T real, imag = th.chunk(packed, 2, dim=-2) # N", "\"\"\" def __init__(self, frame_len: int, frame_hop: int, window: str =", "super(iSTFT, self).__init__(*args, inverse=True, **kwargs) def forward(self, transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],", "inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center)", "else: # N x ... x T x F x", "(1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] = frames[:, 1:] -", "Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop: int, input: str =", "x T x FD splice = th.cat(ctx, -1) else: #", "1 x T s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) #", "str = \"polar\" ) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" Accept", "of the frame frame_hop: hop size between frames output: output", "= kernel.shape[-1] // 2 # NOTE: match with librosa wav", "round_pow_of_two: bool = True, num_bins: Optional[int] = None, sr: int", "wav.dim() if output not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown", "/ float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int", "False, mode=\"librosa\") -> None: super(STFTBase, self).__init__() K, w = init_kernel(frame_len,", "mode: framing mode (librosa or kaldi) \"\"\" if mode not", "\"\"\" def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not", "reshape 1 x F x T if imag_dim == 2:", "not support integer downsample/upsample\") zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio", "bool = False, mode: str = \"librosa\") -> th.Tensor: \"\"\"", "(norm + EPSILON) # N x S s = s.squeeze(1)", "src_sr x K times = (np.arange(dst_sr)[:, None, None] / float(dst_sr)", "as the FFT size num_bins: number of the frequency bins", "mode: \"kaldi\"|\"librosa\", slight difference on applying window function \"\"\" if", "N x S, reshape N x 1 x S #", "1 x W K = th.reshape(K, (B * 2, 1,", "+ EPSILON) # N x S s = s.squeeze(1) return", "Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT function implementation, equals to STFT layer", "= wav.dim() if output not in [\"polar\", \"complex\", \"real\"]: raise", "https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of the source signal dst_sr:", "that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the", "onesided STFT inverse: using iDFT kernel (for iSTFT) \"\"\" def", "2 s = s[..., pad:-pad] norm = norm[..., pad:-pad] s", "number of the frames \"\"\" if th.sum(wav_len <= self.frame_len): raise", "device = transform.device else: device = transform[0].device K, w =", "* padding + 1)[None, None, :] + padding) window =", "# N x (C) x B x T real, imag", "(N // 2 + 1) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr:", "-imag[:, reverse]], 1) # pack: N x 2B x T", "bool = False, pre_emphasis: float = 0, onesided: bool =", "dst_sr: sample rate of the target signal Return: weight (Tensor):", "Tensor]), N x (C) x F x T \"\"\" return", "points B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len # center", "_forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class iSTFT(STFTBase): \"\"\"", "ValueError(\"do not support integer downsample/upsample\") zeros_per_block = min(src_sr, dst_sr) *", "1 x S # else: reshape NC x 1 x", "fmin=fmin, htk=True, norm=\"slaney\" if norm else None) # num_mels x", "{op}\") # [N x ... x T x F, ...]", "frequency (in Hz) norm: normalize the mel filter coefficients \"\"\"", "K[:frame_len] if inverse and not normalized: # to make K^H", "..., 1] reverse = range(kernel.shape[0] // 4 - 1, 0,", "Copyright 2019 <NAME> # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math", "\"blackman\", \"rect\", \"sqrthann\"]: raise RuntimeError(f\"Unknown window type: {wnd}\") wnd_tpl =", "center self.mode = mode self.num_bins = self.K.shape[0] // 4 +", "source signal dst_sr: sample rate of the target signal Return:", "return (real, imag) pair real: return [real; imag] Tensor frame_hop:", "I = th.eye(window.shape[0], device=win.device)[:, None] # 1 x 1 x", "bool = False, mode: str = \"librosa\") -> Union[th.Tensor, Tuple[th.Tensor,", "th.Tensor]], input: str = \"polar\") -> th.Tensor: \"\"\" Accept phase", "-> th.Tensor: \"\"\" Compute number of the frames \"\"\" if", "th.Tensor]]: \"\"\" STFT function implementation, equals to STFT layer Args:", "S s = s.squeeze(1) return s def forward_stft( wav: th.Tensor,", "def init_window(wnd: str, frame_len: int) -> th.Tensor: \"\"\" Return window", "N x 1 x S # else: reshape NC x", "= 1 + int(num_zeros / zeros_per_block) # dst_sr x src_sr", "layer for (i)STFT Args: frame_len: length of the frame frame_hop:", "np.heaviside(1 - np.abs(times / padding), 0.0) * (0.5 + 0.5", "S+2P if center: pad = kernel.shape[-1] // 2 # NOTE:", "* zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor,", "not in [\"librosa\", \"kaldi\"]: raise ValueError(f\"Unsupported mode: {mode}\") # FFT", "torch.nn.functional as tf import librosa.filters as filters from aps.const import", "real = transform[0] * th.cos(transform[1]) imag = transform[0] * th.sin(transform[1])", "x W, NC x W x T, NC x 2B", "of the frames \"\"\" if th.sum(wav_len <= self.frame_len): raise RuntimeError(", "frame round_pow_of_two: if true, choose round(#power_of_two) as the FFT size", "= False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT inner function", "f\"Audio samples less than frame_len ({self.frame_len})\") kernel_size = self.K.shape[-1] if", "x 1 x T s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0)", "th.sin(transform[1]) else: real, imag = transform # (N) x F", "frame frame_hop: hop size between frames window: window name center:", "imag**2 + EPSILON)**0.5 pha = th.atan2(imag, real) return (mag, pha)", "frame_len: int, frame_hop: int, input: str = \"complex\", window: str", "as the FFT size normalized: use normalized DFT kernel pre_emphasis:", "th.reshape(K, (B * 2, 1, K.shape[-1])) return K, window def", "th import torch.nn as nn import torch.nn.functional as tf import", "center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT", "frames Return: transform (Tensor or [Tensor, Tensor]), STFT transform results", "rate of the source signal dst_sr: sample rate of the", "False, mode: str = \"librosa\") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\"", "Optional[float] = None, norm: bool = False) -> th.Tensor: \"\"\"", "wav: source audio signal frame_len: length of the frame frame_hop:", "pre_emphasis: float = 0, frame_hop: int = 256, onesided: bool", "if isinstance(transform, th.Tensor): device = transform.device else: device = transform[0].device", "fmin: float = 0.0, fmax: Optional[float] = None, norm: bool", "if wav_dim not in [2, 3]: raise RuntimeError(f\"STFT expect 2D/3D", "- 1) ctx.append(th.index_select(feats, -2, idx)) if op == \"cat\": #", "x T packed = th.cat([real, imag], dim=1) # N x", "th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float =", "weight (Tensor): coefficients of the filter \"\"\" if src_sr ==", "center flag (similar with that in librosa.stft) round_pow_of_two: if true,", "T, reshape 1 x F x T if imag_dim ==", "\"\"\" Splice feature Args: feats (Tensor): N x ... x", "False, mode: str = \"librosa\") -> th.Tensor: \"\"\" iSTFT function", "right context subsampling_factor: subsampling factor op: operator on feature context", "// 4 + 1 self.expr = ( f\"window={window}, stride={frame_hop}, onesided={onesided},", "= 0.0, fmax: Optional[float] = None, norm: bool = False)", "1) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio:", "(Tensor): N x ... x T x F, original feature", "max=T - 1) ctx.append(th.index_select(feats, -2, idx)) if op == \"cat\":", "wnd: window name frame_len: length of the frame \"\"\" def", "= (num_bins - 1) * 2 # fmin & fmax", "x (C) x S Return transform (Tensor or [Tensor, Tensor]),", "= packed.view(N, -1, packed.shape[-2], packed.shape[-1]) # N x (C) x", "[] T = feats.shape[-2] T = T - T %", "output onesided STFT inverse: using iDFT kernel (for iSTFT) mode:", "bool = True, pre_emphasis: float = 0, normalized: bool =", "center: center flag (similar with that in librosa.stft) round_pow_of_two: if", "(pad, pad), mode=\"reflect\") # STFT if pre_emphasis > 0: #", "th.stack([th.eye(B), th.zeros(B, B)], dim=-1) # W x B x 2", "= freq_upper else: fmax = min(fmax + freq_upper if fmax", "S Return transform (Tensor or [Tensor, Tensor]), N x (C)", "* math.pi)) weight = np.sinc( times * zeros_per_block) * window", "should not be equal to dst_sr: {src_sr}/{dst_sr}\") gcd = math.gcd(src_sr,", "if input not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output", "STFT transform kernels, from init_kernel(...) output (str), output format: polar:", "Return s (Tensor), N x S \"\"\" return _inverse_stft(transform, self.K,", "hop size in number samples onesided: return half FFT bins", "periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not in [\"bartlett\", \"hann\",", "def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not in", "extra_repr(self) -> str: return self.expr class STFT(STFTBase): \"\"\" Short-time Fourier", "= False, inverse: bool = False, mode: str = \"librosa\")", "int = 16000, num_mels: int = 80, fmin: float =", "1 x W I = th.eye(window.shape[0], device=win.device)[:, None] # 1", "speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float = 0.95, num_zeros: int", "= self.K.shape[0] // 4 + 1 self.expr = ( f\"window={window},", "EPSILON) # N x S s = s.squeeze(1) return s", "(Tensor or [Tensor, Tensor]), STFT transform results kernel (Tensor), STFT", "imag = th.chunk(packed, 2, dim=-2) # N x (C) x", "float = 0.95, num_zeros: int = 64) -> th.Tensor: \"\"\"", "str = \"sqrthann\", round_pow_of_two: bool = True, pre_emphasis: float =", "C x 2B x T if wav_dim == 3: packed", "= np.heaviside(1 - np.abs(times / padding), 0.0) * (0.5 +", "reverse]], 1) # pack: N x 2B x T packed", "dst_sr: raise ValueError( f\"src_sr should not be equal to dst_sr:", "bool = True, center: bool = False, mode: str =", "kernel, stride=frame_hop, padding=0) # NC x 2B x T =>", "Tensor frame_hop: frame hop size in number samples onesided: return", "coefficients \"\"\" # FFT points if num_bins is None: N", "mode=mode) return _inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center) class", "length of the frame frame_hop: hop size between frames input:", "pad = kernel.shape[-1] // 2 # NOTE: match with librosa", "1) imag = th.cat([imag, -imag[:, reverse]], 1) # pack: N", "N x ... x T x F x D splice", "True, center: bool = False, mode: str = \"librosa\") ->", "inverse=True, mode=mode) return _inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center)", "def init_kernel(frame_len: int, frame_hop: int, window: str, round_pow_of_two: bool =", "normalized: bool = False, inverse: bool = False, mode: str", "forward_stft( wav: th.Tensor, frame_len: int, frame_hop: int, output: str =", "the FFT size normalized: use normalized DFT kernel onesided: output", "FFT bins center: used in _forward_stft Return: wav (Tensor), N", "= onesided self.pre_emphasis = pre_emphasis self.center = center self.mode =", "round(#power_of_two) as the FFT size pre_emphasis: factor of preemphasis normalized:", "STFT output Return s (Tensor), N x S \"\"\" return", "dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float = 0.95,", "\"cat\") -> th.Tensor: \"\"\" Splice feature Args: feats (Tensor): N", "if true, choose round(#power_of_two) as the FFT size num_bins: number", "applying window function \"\"\" K, _ = init_kernel(frame_len, frame_hop, init_window(window,", "[\"librosa\", \"kaldi\"]: raise ValueError(f\"Unsupported mode: {mode}\") # FFT points B", "kernels Args: frame_len: length of the frame frame_hop: hop size", "(Tensor): feature with context padded \"\"\" if lctx + rctx", "- 2, ..., 1] reverse = range(kernel.shape[0] // 4 -", "__init__(self, *args, **kwargs): super(STFT, self).__init__(*args, inverse=False, **kwargs) def forward( self,", "False) -> th.Tensor: \"\"\" iSTFT inner function Args: transform (Tensor", "of the mel bands fmin: lowest frequency (in Hz) fmax:", "< 0 else fmax, freq_upper) fmin = max(0, fmin) #", "else None) # num_mels x (N // 2 + 1)", "window: window name round_pow_of_two: if true, choose round(#power_of_two) as the", "from init_kernel(...) output (str), output format: polar: return (magnitude, phase)", "**kwargs): super(STFT, self).__init__(*args, inverse=False, **kwargs) def forward( self, wav: th.Tensor,", "Accept phase & magnitude and output raw waveform Args transform", "T = feats.shape[-2] T = T - T % subsampling_factor", "norm = norm[..., pad:-pad] s = s / (norm +", "normalized={normalized}, \" + f\"center={self.center}, mode={self.mode}, \" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self,", "# 1 x W x T win = th.repeat_interleave(window[None, ...,", "num_mels: int = 80, fmin: float = 0.0, fmax: Optional[float]", "fmax: highest frequency (in Hz) norm: normalize the mel filter", "x T x F, original feature lctx: left context rctx:", "...] ctx = [] T = feats.shape[-2] T = T", "\"real\": real, imag = transform[..., 0], transform[..., 1] elif input", "kernel: th.Tensor, output: str = \"polar\", pre_emphasis: float = 0,", "rctx: int = 1, subsampling_factor: int = 1, op: str", "kernel: th.Tensor, window: th.Tensor, input: str = \"polar\", frame_hop: int", "- kernel_size) // self.frame_hop + 1 def extra_repr(self) -> str:", "False, center: bool = False, mode=\"librosa\") -> None: super(STFTBase, self).__init__()", "== 2: real = th.unsqueeze(real, 0) imag = th.unsqueeze(imag, 0)", "signal, N x (C) x S Return transform (Tensor or", "T, NC x 2B x T packed = th.matmul(kernel[:, 0][None,", "2B x T packed = th.matmul(kernel[:, 0][None, ...], frames) else:", "Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import numpy as np import", "= True, normalized: bool = False, inverse: bool = False,", "def mel_filter(frame_len: int, round_pow_of_two: bool = True, num_bins: Optional[int] =", "transform kernels, from init_kernel(...) output (str), output format: polar: return", "x src_sr x K times = (np.arange(dst_sr)[:, None, None] /", "FFT size num_bins: number of the frequency bins produced by", "pad:-pad] s = s / (norm + EPSILON) # N", "False, pre_emphasis: float = 0, onesided: bool = True, inverse:", "real = th.cat([real, real[:, reverse]], 1) imag = th.cat([imag, -imag[:,", "(Tensor or [Tensor, Tensor]), N x (C) x F x", "padding=0) # normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1", "= 0, onesided: bool = True, inverse: bool = False,", "slight difference on applying window function onesided: output onesided STFT", "coefficient Args: wnd: window name frame_len: length of the frame", "\"hamm\", \"blackman\", \"rect\", \"sqrthann\"]: raise RuntimeError(f\"Unknown window type: {wnd}\") wnd_tpl", "T x F, original feature lctx: left context rctx: right", "name frame_len: length of the frame \"\"\" def sqrthann(frame_len, periodic=True):", "\"hamm\": th.hamming_window, \"blackman\": th.blackman_window, \"bartlett\": th.bartlett_window, \"rect\": th.ones } if", "wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len: int, frame_hop: int, window: str,", "forward( self, wav: th.Tensor, output: str = \"polar\" ) ->", "if center: pad = kernel.shape[-1] // 2 s = s[...,", "of the filter \"\"\" if src_sr == dst_sr: raise ValueError(", "int, frame_hop: int, window: str, round_pow_of_two: bool = True, normalized:", "class STFTBase(nn.Module): \"\"\" Base layer for (i)STFT Args: frame_len: length", "\"polar\" ) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" Accept (single or", "W x T frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop,", "ValueError(f\"Unknown op for feature splicing: {op}\") # [N x ...", "K = I K = K / B # 2", "reshape NC x 1 x S N, S = wav.shape[0],", "3: packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1]) # N x", "License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import numpy as np", "Tensor]), STFT output Return s (Tensor), N x S \"\"\"", "splicing: {op}\") # [N x ... x T x F,", "# fmin & fmax freq_upper = sr // 2 if", "padding), 0.0) * (0.5 + 0.5 * np.cos(times / padding", "\"\"\" STFT inner function Args: wav (Tensor), N x (C)", "Return: transform (Tensor or [Tensor, Tensor]), STFT transform results \"\"\"", "NC x 1 x S+2P if center: pad = kernel.shape[-1]", "= transform.device else: device = transform[0].device K, w = init_kernel(frame_len,", "def __init__(self, *args, **kwargs): super(STFT, self).__init__(*args, inverse=False, **kwargs) def forward(", "= tf.pad(wav, (pad, pad), mode=\"reflect\") # STFT if pre_emphasis >", "frame hop size in number samples pre_emphasis: factor of preemphasis", "s = s / (norm + EPSILON) # N x", "2 x B x W K = th.transpose(K, 0, 2)", "= mode self.num_bins = self.K.shape[0] // 4 + 1 self.expr", "th.Tensor, input: str = \"polar\", frame_hop: int = 256, onesided:", "init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform, K.to(device), w.to(device),", "multiple channel) raw waveform and output magnitude and phase Args", "kernel pre_emphasis: factor of preemphasis mode: \"kaldi\"|\"librosa\", slight difference on", "STFTBase(nn.Module): \"\"\" Base layer for (i)STFT Args: frame_len: length of", "norm: normalize the mel filter coefficients \"\"\" # FFT points", "th.Tensor]], kernel: th.Tensor, window: th.Tensor, input: str = \"polar\", frame_hop:", "size between frames output: output type (complex, real, polar) window:", "x W x T win = th.repeat_interleave(window[None, ..., None], packed.shape[-1],", "\"kaldi\"|\"librosa\", slight difference on applying window function \"\"\" if isinstance(transform,", "centered frames Return: transform (Tensor or [Tensor, Tensor]), STFT transform", "x B x 2 K = th.fft(I / S, 1)", "x S+2P if center: pad = kernel.shape[-1] // 2 #", "dim=-1) # W x 1 x W I = th.eye(window.shape[0],", "\"\"\" iSTFT function implementation, equals to iSTFT layer Args: transform:", "window: th.Tensor, input: str = \"polar\", frame_hop: int = 256,", "th.stack(ctx, -1) return splice def _forward_stft( wav: th.Tensor, kernel: th.Tensor,", "str, round_pow_of_two: bool = True, normalized: bool = False, inverse:", "the frame round_pow_of_two: if true, choose round(#power_of_two) as the FFT", "input=input, frame_hop=frame_hop, onesided=onesided, center=center) class STFTBase(nn.Module): \"\"\" Base layer for", "applying window function \"\"\" if isinstance(transform, th.Tensor): device = transform.device", "N x (C) x S kernel (Tensor), STFT transform kernels,", "pair complex: return (real, imag) pair real: return [real; imag]", "th.cos(transform[1]) imag = transform[0] * th.sin(transform[1]) else: real, imag =", "= False, center: bool = False, mode=\"librosa\") -> None: super(STFTBase,", "round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=inverse, mode=mode) self.K = nn.Parameter(K, requires_grad=False) self.w =", "wav.shape[-1] wav = wav.view(-1, 1, S) # NC x 1", "to dst_sr: {src_sr}/{dst_sr}\") gcd = math.gcd(src_sr, dst_sr) src_sr = src_sr", "int, frame_hop: int, input: str = \"complex\", window: str =", "// gcd dst_sr = dst_sr // gcd if src_sr ==", "str = \"cat\") -> th.Tensor: \"\"\" Splice feature Args: feats", "fmax is None: fmax = freq_upper else: fmax = min(fmax", "inverse: bool = False, mode: str = \"librosa\") -> th.Tensor:", "x T frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0)", "T if wav_dim == 3: packed = packed.view(N, -1, packed.shape[-2],", "function onesided: output onesided STFT inverse: using iDFT kernel (for", "STFT kernels Args: frame_len: length of the frame frame_hop: hop", "+ freq_upper if fmax < 0 else fmax, freq_upper) fmin", "pad = kernel.shape[-1] // 2 s = s[..., pad:-pad] norm", "== 1: raise ValueError(\"do not support integer downsample/upsample\") zeros_per_block =", "if true, choose round(#power_of_two) as the FFT size normalized: use", "freq_upper if fmax < 0 else fmax, freq_upper) fmin =", "length of the frame frame_hop: hop size between frames output:", "* 2, 1, K.shape[-1])) return K, window def mel_filter(frame_len: int,", "th.sum(wav_len <= self.frame_len): raise RuntimeError( f\"Audio samples less than frame_len", "N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len else: N", "= [] T = feats.shape[-2] T = T - T", "int = 256, onesided: bool = False, center: bool =", "True, normalized: bool = False, onesided: bool = True, center:", "samples pre_emphasis: factor of preemphasis onesided: return half FFT bins", "x S N, S = wav.shape[0], wav.shape[-1] wav = wav.view(-1,", "normalized: use normalized DFT kernel pre_emphasis: factor of preemphasis mode:", "transform results \"\"\" wav_dim = wav.dim() if output not in", "nn.Parameter(K, requires_grad=False) self.w = nn.Parameter(w, requires_grad=False) self.frame_len = frame_len self.frame_hop", "bool = False) -> th.Tensor: \"\"\" iSTFT inner function Args:", "output type (complex, real, polar) window: window name center: center", "center padding window if needed if mode == \"librosa\" and", "- 1) * 2 # fmin & fmax freq_upper =", "bands fmin: lowest frequency (in Hz) fmax: highest frequency (in", "// 4 + 1 real = real[..., :num_bins, :] imag", "pre_emphasis self.center = center self.mode = mode self.num_bins = self.K.shape[0]", "kernel.shape[0] // 4 + 1 real = real[..., :num_bins, :]", "iSTFT layer Args: transform: results of STFT frame_len: length of", "- T % subsampling_factor for c in range(-lctx, rctx +", "STFT transform results kernel (Tensor), STFT transform kernels, from init_kernel(...)", "support integer downsample/upsample\") zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio padding", "if src_sr == 1 or dst_sr == 1: raise ValueError(\"do", "s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) # normalized audio samples", "mel filter coefficients Args: frame_len: length of the frame round_pow_of_two:", "= I S = B**0.5 else: S = 1 I", "imag_dim == 2: real = th.unsqueeze(real, 0) imag = th.unsqueeze(imag,", "x T, NC x 2B x T packed = th.matmul(kernel[:,", "return K, window def mel_filter(frame_len: int, round_pow_of_two: bool = True,", "x 2B x T if wav_dim == 3: packed =", "round_pow_of_two: bool = True, normalized: bool = False, onesided: bool", "wnd != \"rect\": # match with librosa c = wnd_tpl[wnd](frame_len,", "real, imag = th.chunk(packed, 2, dim=-2) # N x (C)", "def num_frames(self, wav_len: th.Tensor) -> th.Tensor: \"\"\" Compute number of", "import numpy as np import torch as th import torch.nn", "=> N x C x 2B x T if wav_dim", "frames \"\"\" if th.sum(wav_len <= self.frame_len): raise RuntimeError( f\"Audio samples", "in number samples onesided: return half FFT bins center: used", "th.Tensor: \"\"\" Return mel filter coefficients Args: frame_len: length of", "x T x F x D splice = th.stack(ctx, -1)", "round_pow_of_two: if true, choose round(#power_of_two) as the FFT size num_bins:", "1, op: str = \"cat\") -> th.Tensor: \"\"\" Splice feature", "output not in [\"polar\", \"complex\", \"real\"]: raise ValueError(f\"Unknown output format:", "x W x T frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),", "wnd not in [\"bartlett\", \"hann\", \"hamm\", \"blackman\", \"rect\", \"sqrthann\"]: raise", "2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len else: N = (num_bins", "= \"polar\", pre_emphasis: float = 0, frame_hop: int = 256,", "\"kaldi\"|\"librosa\", slight difference on applying window function \"\"\" K, _", "= imag[..., :num_bins, :] if output == \"complex\": return (real,", "filter \"\"\" if src_sr == dst_sr: raise ValueError( f\"src_sr should", "of STFT frame_len: length of the frame frame_hop: hop size", "feats if op not in [\"cat\", \"stack\"]: raise ValueError(f\"Unknown op", "+= kernel_size return (wav_len - kernel_size) // self.frame_hop + 1", "Args: transform: results of STFT frame_len: length of the frame", "K = th.transpose(K, 0, 2) * window # 2B x", "not in [2, 3]: raise RuntimeError(f\"STFT expect 2D/3D tensor, but", "B - frame_len - lpad)) if normalized: # make K^H", "None: fmax = freq_upper else: fmax = min(fmax + freq_upper", "N x S s = s.squeeze(1) return s def forward_stft(", "return c def init_kernel(frame_len: int, frame_hop: int, window: str, round_pow_of_two:", "*args, **kwargs): super(iSTFT, self).__init__(*args, inverse=True, **kwargs) def forward(self, transform: Union[th.Tensor,", "STFT inverse: using iDFT kernel (for iSTFT) mode: \"kaldi\"|\"librosa\", slight", "\"kaldi\": K = K[:frame_len] if inverse and not normalized: #", "T \"\"\" return _forward_stft(wav, self.K, output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center)", "or [Tensor, Tensor]), STFT transform results \"\"\" wav_dim = wav.dim()", "phase Args wav (Tensor) input signal, N x (C) x", "x 1 x S # else: reshape NC x 1", "size in number samples onesided: return half FFT bins center:", "NC x 1 x S N, S = wav.shape[0], wav.shape[-1]", "if self.center: wav_len += kernel_size return (wav_len - kernel_size) //", "DFT kernel onesided: output onesided STFT mode: \"kaldi\"|\"librosa\", slight difference", "inverse: return iDFT matrix mode: framing mode (librosa or kaldi)", "= th.stack(ctx, -1) return splice def _forward_stft( wav: th.Tensor, kernel:", "bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT inner", "as th import torch.nn as nn import torch.nn.functional as tf", "def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float = 0.95, num_zeros:", "if wnd != \"rect\": # match with librosa c =", "np.abs(times / padding), 0.0) * (0.5 + 0.5 * np.cos(times", "self.num_bins = self.K.shape[0] // 4 + 1 self.expr = (", "T = T - T % subsampling_factor for c in", "= \"sqrthann\", round_pow_of_two: bool = True, pre_emphasis: float = 0,", "idx = th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats, -2, idx))", "[Tensor, Tensor]), STFT transform results kernel (Tensor), STFT transform kernels,", "the frame frame_hop: hop size between frames input: input format", "output=output, frame_hop=self.frame_hop, pre_emphasis=self.pre_emphasis, onesided=self.onesided, center=self.center) class iSTFT(STFTBase): \"\"\" Inverse Short-time", "import math import numpy as np import torch as th", "Fourier Transform as a Layer \"\"\" def __init__(self, *args, **kwargs):", "= 1, subsampling_factor: int = 1, op: str = \"cat\")", "size normalized: use normalized DFT kernel onesided: output onesided STFT", "packed.shape[-2], packed.shape[-1]) # N x (C) x B x T", "norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if center: pad =", "\"complex\", \"real\"]: raise ValueError(f\"Unknown output format: {input}\") if input ==", "1, S) # NC x 1 x S+2P if center:", "# N x 1 x T s = tf.conv_transpose1d(packed, kernel,", "round_pow_of_two: if true, choose round(#power_of_two) as the FFT size pre_emphasis:", "x 2B x T packed = th.matmul(kernel[:, 0][None, ...], frames)", "dtype=th.int64) idx = th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats, -2,", "x T packed = th.matmul(kernel[:, 0][None, ...], frames) else: packed", "num_bins is None: N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else", "2, ..., 1] reverse = range(kernel.shape[0] // 4 - 1,", "= (B - frame_len) // 2 window = tf.pad(window, (lpad,", "device=win.device)[:, None] # 1 x 1 x T norm =", "= frame_hop self.onesided = onesided self.pre_emphasis = pre_emphasis self.center =", "Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: \"\"\" STFT function implementation, equals to STFT", "*args, **kwargs): super(STFT, self).__init__(*args, inverse=False, **kwargs) def forward( self, wav:", "num_zeros: int = 64) -> th.Tensor: \"\"\" Return speed perturb", "I, stride=frame_hop, padding=0) if center: pad = kernel.shape[-1] // 2", "th.cat(ctx, -1) else: # N x ... x T x", "frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop,", "/ padding * math.pi)) weight = np.sinc( times * zeros_per_block)", "= th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1) # W x 1", "normalized DFT kernel onesided: output onesided STFT inverse: using iDFT", "Args: src_sr: sample rate of the source signal dst_sr: sample", "inverse: using iDFT kernel (for iSTFT) \"\"\" def __init__(self, frame_len:", "frame_len: int) -> th.Tensor: \"\"\" Return window coefficient Args: wnd:", "Args: wav (Tensor), N x (C) x S kernel (Tensor),", "= sr // 2 if fmax is None: fmax =", "input signal, N x (C) x S Return transform (Tensor", "2) * window # 2B x 1 x W K", "polar) window: window name center: center flag (similar with that", "= th.stack([th.eye(B), th.zeros(B, B)], dim=-1) # W x B x", "filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of the source", "wav (Tensor) input signal, N x (C) x S Return", "onesided: output onesided STFT inverse: using iDFT kernel (for iSTFT)", "equal to dst_sr: {src_sr}/{dst_sr}\") gcd = math.gcd(src_sr, dst_sr) src_sr =", "x 2 K = th.fft(I / S, 1) if mode", "DFT kernel pre_emphasis: factor of preemphasis mode: \"kaldi\"|\"librosa\", slight difference", "\"rect\": th.ones } if wnd != \"rect\": # match with", "in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT", "# NC x 1 x S+2P if center: pad =", "return [real; imag] Tensor frame_hop: frame hop size in number", "inverse: bool = False, center: bool = False, mode=\"librosa\") ->", "s (Tensor), N x S \"\"\" return _inverse_stft(transform, self.K, self.w,", "if op not in [\"cat\", \"stack\"]: raise ValueError(f\"Unknown op for", "mode == \"kaldi\": K = K[:frame_len] if inverse and not", "x ... x T x FD splice = th.cat(ctx, -1)", "f\"center={self.center}, mode={self.mode}, \" + f\"kernel_size={self.num_bins}x{self.K.shape[2]}\") def num_frames(self, wav_len: th.Tensor) ->", "the filter \"\"\" if src_sr == dst_sr: raise ValueError( f\"src_sr", "K = th.reshape(K, (B * 2, 1, K.shape[-1])) return K,", "None], packed.shape[-1], dim=-1) # W x 1 x W I", "complex: return (real, imag) pair real: return [real; imag] Tensor", "frame frame_hop: hop size between frames output: output type (complex,", "= min(src_sr, dst_sr) * cutoff_ratio padding = 1 + int(num_zeros", "wnd_tpl = { \"sqrthann\": sqrthann, \"hann\": th.hann_window, \"hamm\": th.hamming_window, \"blackman\":", "frames window: window name center: center flag (similar with that", "S # else: reshape NC x 1 x S N,", "dst_sr // gcd if src_sr == 1 or dst_sr ==", "0, -1) # extend matrix: N x B x T", "inverse=False, **kwargs) def forward( self, wav: th.Tensor, output: str =", "0 else fmax, freq_upper) fmin = max(0, fmin) # mel", "inverse=inverse, mode=mode) self.K = nn.Parameter(K, requires_grad=False) self.w = nn.Parameter(w, requires_grad=False)", "transform (Tensor or [Tensor, Tensor]), STFT output Return s (Tensor),", "transform (Tensor or [Tensor, Tensor]), STFT transform results \"\"\" wav_dim", "Args: wnd: window name frame_len: length of the frame \"\"\"", "= False, center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:", "x 2B x T packed = th.cat([real, imag], dim=1) #", "= tf.conv1d(wav, kernel, stride=frame_hop, padding=0) # NC x 2B x", "ValueError( f\"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}\") gcd", "if true, we assumed to have centered frames Return: transform", "= s.squeeze(1) return s def forward_stft( wav: th.Tensor, frame_len: int,", "T s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) # normalized audio", "frame_len: int, frame_hop: int, output: str = \"complex\", window: str", "self.pre_emphasis = pre_emphasis self.center = center self.mode = mode self.num_bins", "[real; imag] Tensor frame_hop: frame hop size in number samples", "STFT transform kernels, from init_kernel(...) input (str), input format: polar:", "stride=frame_hop, padding=0) # normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 #", "frame_hop: int, window: str, round_pow_of_two: bool = True, normalized: bool", "num_frames(self, wav_len: th.Tensor) -> th.Tensor: \"\"\" Compute number of the", "frame_len # center padding window if needed if mode ==", "int, dst_sr: int, cutoff_ratio: float = 0.95, num_zeros: int =", "mode == \"librosa\" and B != frame_len: lpad = (B", "1, rctx: int = 1, subsampling_factor: int = 1, op:", "frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device),", "B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len # center padding", "on feature context Return: splice (Tensor): feature with context padded" ]
[ "all written for: NHWC class TensorflowResNet(ResNet): def __init__(self, *args, **kwargs):", "b_init = tf.zeros_initializer() biases = self._get_variable('biases', shape=bshape, init=b_init) x =", "x, ksize, stride, filters_out, bias=True): filters_in = x.get_shape()[-1] wshape =", "filters_in = x.get_shape()[-1] wshape = [ksize, ksize, filters_in, filters_out] w_init", "self.dtype = tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs) def _get_variable(self, name, shape,", "2)): x = tf.reduce_mean(x, reduction_indices=indices) return x def maxpool(self, x):", "reduction_axes=[-3, -2]) return x def fc(self, x, num_units_out): num_units_in =", "paddings=[[0, 0], [0, 0], [0, 0], [0, pad]]) else: shortcut", "shortcut = self.conv(shortcut, 1, stride, out_filters) shortcut = self.norm(shortcut) x", "wshape = [ksize, ksize, filters_in, filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights", "return x def fc(self, x, num_units_out): num_units_in = x.get_shape()[1] w_init", "= self._get_variable('biases', shape=[num_units_out], init=b_init) x = tf.nn.xw_plus_b(x, weights, biases) return", "!= 0 or type == 'C': if type == 'A':", "'GROUP': x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True, training=training, trainable=training, channels_axis=-1,", "= x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0) with self.namescope('fc'):", "= tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME') if bias:", "= tf.nn.xw_plus_b(x, weights, biases) return x def reduce_mean(self, x, indices=(1,", "return x def reduce_mean(self, x, indices=(1, 2)): x = tf.reduce_mean(x,", "<reponame>xihuaiwen/chinese_bert # Copyright 2019 Graphcore Ltd. from models.resnet_base import ResNet", "x = tf.reduce_mean(x, reduction_indices=indices) return x def maxpool(self, x): x", "3, 1], strides=[1, 2, 2, 1], padding='SAME') return x def", "shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0,", "= self._get_variable('weights', shape=wshape, init=w_init) x = tf.nn.conv2d(x, weights, [1, stride,", "1], padding='SAME') if bias: bshape = [filters_out] b_init = tf.zeros_initializer()", "x def norm(self, x, type='BATCH', groups=32, training=False): if type ==", "bias=True): filters_in = x.get_shape()[-1] wshape = [ksize, ksize, filters_in, filters_out]", "biases = self._get_variable('biases', shape=bshape, init=b_init) x = x + biases", "= shortcut.get_shape() pad = int(x.get_shape()[3] - in_shape[3]) if pad !=", "w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0) with self.namescope('fc'): weights =", "0], in_shape, strides=[1, stride, stride, 1]) shortcut = tf.pad(shortcut, paddings=[[0,", "tf.layers.batch_normalization(x, fused=True, center=True, scale=True, training=training, trainable=training, momentum=0.997, epsilon=1e-5) elif type", "shape, init): return tf.get_variable(name, shape, initializer=init, dtype=self.dtype) def residual(self, x,", "x.get_shape()[-1] wshape = [ksize, ksize, filters_in, filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype)", "init=w_init) x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')", "stride, 1], padding='SAME') if bias: bshape = [filters_out] b_init =", "class TensorflowResNet(ResNet): def __init__(self, *args, **kwargs): self.dtype = tf.float16 super(TensorflowResNet,", "use tf.nn.fused_batch_norm instead. x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True, training=training,", "type == 'C': if type == 'A': shortcut = tf.strided_slice(shortcut,", "biases return x def norm(self, x, type='BATCH', groups=32, training=False): if", "ResNet import tensorflow.compat.v1 as tf import tensorflow.contrib as contrib from", "[0, 0], [0, 0], [0, pad]]) else: shortcut = self.conv(shortcut,", "tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME') if bias: bshape", "return x def norm(self, x, type='BATCH', groups=32, training=False): if type", "== 'GROUP': x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True, training=training, trainable=training,", "x = shortcut + x x = self.relu(x) return x", "tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs) def _get_variable(self, name, shape, init): return", "shortcut, out_filters, stride, type='B'): in_shape = shortcut.get_shape() pad = int(x.get_shape()[3]", "stride, stride, 1], padding='SAME') if bias: bshape = [filters_out] b_init", "x, num_units_out): num_units_in = x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init =", "super(TensorflowResNet, self).__init__(*args, **kwargs) def _get_variable(self, name, shape, init): return tf.get_variable(name,", "'A': shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape, strides=[1,", "Ltd. from models.resnet_base import ResNet import tensorflow.compat.v1 as tf import", "import ResNet import tensorflow.compat.v1 as tf import tensorflow.contrib as contrib", "tf import tensorflow.contrib as contrib from tensorflow.python.ipu import normalization_ops #", "[0, pad]]) else: shortcut = self.conv(shortcut, 1, stride, out_filters) shortcut", "out_filters, stride, type='B'): in_shape = shortcut.get_shape() pad = int(x.get_shape()[3] -", "x + biases return x def norm(self, x, type='BATCH', groups=32,", "num_units_out], init=w_init) biases = self._get_variable('biases', shape=[num_units_out], init=b_init) x = tf.nn.xw_plus_b(x,", "tensorflow.compat.v1 as tf import tensorflow.contrib as contrib from tensorflow.python.ipu import", "'C': if type == 'A': shortcut = tf.strided_slice(shortcut, [0, 0,", "if type == 'BATCH': # Perhaps use tf.nn.fused_batch_norm instead. x", "= self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init) biases = self._get_variable('biases', shape=[num_units_out], init=b_init)", "Copyright 2019 Graphcore Ltd. from models.resnet_base import ResNet import tensorflow.compat.v1", "normalization_ops.group_norm(x, groups=groups, center=True, scale=True, training=training, trainable=training, channels_axis=-1, reduction_axes=[-3, -2]) return", "in_shape[3]) if pad != 0 or type == 'C': if", "def relu(self, x): return tf.nn.relu(x) def conv(self, x, ksize, stride,", "0, 0, 0], in_shape, strides=[1, stride, stride, 1]) shortcut =", "x def relu(self, x): return tf.nn.relu(x) def conv(self, x, ksize,", "x, shortcut, out_filters, stride, type='B'): in_shape = shortcut.get_shape() pad =", "[filters_out] b_init = tf.zeros_initializer() biases = self._get_variable('biases', shape=bshape, init=b_init) x", "stride, 1]) shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0,", "= [ksize, ksize, filters_in, filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights =", "if type == 'A': shortcut = tf.strided_slice(shortcut, [0, 0, 0,", "strides=[1, stride, stride, 1]) shortcut = tf.pad(shortcut, paddings=[[0, 0], [0,", "0], [0, pad]]) else: shortcut = self.conv(shortcut, 1, stride, out_filters)", "-2]) return x def fc(self, x, num_units_out): num_units_in = x.get_shape()[1]", "Graphcore Ltd. from models.resnet_base import ResNet import tensorflow.compat.v1 as tf", "_get_variable(self, name, shape, init): return tf.get_variable(name, shape, initializer=init, dtype=self.dtype) def", "contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0) with self.namescope('fc'): weights = self._get_variable('weights', shape=[num_units_in,", "__init__(self, *args, **kwargs): self.dtype = tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs) def", "x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')", "stride, stride, 1]) shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0],", "stride, type='B'): in_shape = shortcut.get_shape() pad = int(x.get_shape()[3] - in_shape[3])", "tf.zeros_initializer() biases = self._get_variable('biases', shape=bshape, init=b_init) x = x +", "normalization_ops # This is all written for: NHWC class TensorflowResNet(ResNet):", "x, indices=(1, 2)): x = tf.reduce_mean(x, reduction_indices=indices) return x def", "type == 'BATCH': # Perhaps use tf.nn.fused_batch_norm instead. x =", "ksize, filters_in, filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights', shape=wshape,", "epsilon=1e-5) elif type == 'GROUP': x = normalization_ops.group_norm(x, groups=groups, center=True,", "= contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0) with self.namescope('fc'): weights = self._get_variable('weights',", "= tf.nn.max_pool( x, ksize=[1, 3, 3, 1], strides=[1, 2, 2,", "= tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape, strides=[1, stride, stride,", "self.conv(shortcut, 1, stride, out_filters) shortcut = self.norm(shortcut) x = shortcut", "relu(self, x): return tf.nn.relu(x) def conv(self, x, ksize, stride, filters_out,", "self.relu(x) return x def relu(self, x): return tf.nn.relu(x) def conv(self,", "pad = int(x.get_shape()[3] - in_shape[3]) if pad != 0 or", "def conv(self, x, ksize, stride, filters_out, bias=True): filters_in = x.get_shape()[-1]", "= int(x.get_shape()[3] - in_shape[3]) if pad != 0 or type", "def maxpool(self, x): x = tf.nn.max_pool( x, ksize=[1, 3, 3,", "return tf.nn.relu(x) def conv(self, x, ksize, stride, filters_out, bias=True): filters_in", "if bias: bshape = [filters_out] b_init = tf.zeros_initializer() biases =", "x def fc(self, x, num_units_out): num_units_in = x.get_shape()[1] w_init =", "x): return tf.nn.relu(x) def conv(self, x, ksize, stride, filters_out, bias=True):", "= normalization_ops.group_norm(x, groups=groups, center=True, scale=True, training=training, trainable=training, channels_axis=-1, reduction_axes=[-3, -2])", "= shortcut + x x = self.relu(x) return x def", "# Perhaps use tf.nn.fused_batch_norm instead. x = tf.layers.batch_normalization(x, fused=True, center=True,", "0], [0, 0], [0, pad]]) else: shortcut = self.conv(shortcut, 1,", "shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape, strides=[1, stride,", "# Copyright 2019 Graphcore Ltd. from models.resnet_base import ResNet import", "in_shape, strides=[1, stride, stride, 1]) shortcut = tf.pad(shortcut, paddings=[[0, 0],", "self.namescope('fc'): weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init) biases = self._get_variable('biases',", "x x = self.relu(x) return x def relu(self, x): return", "out_filters) shortcut = self.norm(shortcut) x = shortcut + x x", "w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights', shape=wshape, init=w_init) x =", "groups=32, training=False): if type == 'BATCH': # Perhaps use tf.nn.fused_batch_norm", "fc(self, x, num_units_out): num_units_in = x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init", "- in_shape[3]) if pad != 0 or type == 'C':", "import tensorflow.compat.v1 as tf import tensorflow.contrib as contrib from tensorflow.python.ipu", "training=training, trainable=training, channels_axis=-1, reduction_axes=[-3, -2]) return x def fc(self, x,", "return x def relu(self, x): return tf.nn.relu(x) def conv(self, x,", "weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init) biases = self._get_variable('biases', shape=[num_units_out],", "def reduce_mean(self, x, indices=(1, 2)): x = tf.reduce_mean(x, reduction_indices=indices) return", "bias: bshape = [filters_out] b_init = tf.zeros_initializer() biases = self._get_variable('biases',", "x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True, training=training, trainable=training, momentum=0.997, epsilon=1e-5)", "momentum=0.997, epsilon=1e-5) elif type == 'GROUP': x = normalization_ops.group_norm(x, groups=groups,", "x, type='BATCH', groups=32, training=False): if type == 'BATCH': # Perhaps", "shape=wshape, init=w_init) x = tf.nn.conv2d(x, weights, [1, stride, stride, 1],", "+ biases return x def norm(self, x, type='BATCH', groups=32, training=False):", "x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME') if", "shortcut + x x = self.relu(x) return x def relu(self,", "2, 1], padding='SAME') return x def namescope(self, debug_string): return tf.variable_scope(debug_string)", "type='BATCH', groups=32, training=False): if type == 'BATCH': # Perhaps use", "# This is all written for: NHWC class TensorflowResNet(ResNet): def", "pad != 0 or type == 'C': if type ==", "[ksize, ksize, filters_in, filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights',", "dtype=self.dtype) def residual(self, x, shortcut, out_filters, stride, type='B'): in_shape =", "contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights', shape=wshape, init=w_init) x = tf.nn.conv2d(x, weights,", "= tf.reduce_mean(x, reduction_indices=indices) return x def maxpool(self, x): x =", "return tf.get_variable(name, shape, initializer=init, dtype=self.dtype) def residual(self, x, shortcut, out_filters,", "fused=True, center=True, scale=True, training=training, trainable=training, momentum=0.997, epsilon=1e-5) elif type ==", "tf.nn.max_pool( x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],", "type == 'A': shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0],", "init=b_init) x = tf.nn.xw_plus_b(x, weights, biases) return x def reduce_mean(self,", "residual(self, x, shortcut, out_filters, stride, type='B'): in_shape = shortcut.get_shape() pad", "b_init = tf.constant_initializer(0.0) with self.namescope('fc'): weights = self._get_variable('weights', shape=[num_units_in, num_units_out],", "ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') return", "tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]]) else:", "strides=[1, 2, 2, 1], padding='SAME') return x def namescope(self, debug_string):", "models.resnet_base import ResNet import tensorflow.compat.v1 as tf import tensorflow.contrib as", "tf.constant_initializer(0.0) with self.namescope('fc'): weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init) biases", "biases = self._get_variable('biases', shape=[num_units_out], init=b_init) x = tf.nn.xw_plus_b(x, weights, biases)", "= x.get_shape()[-1] wshape = [ksize, ksize, filters_in, filters_out] w_init =", "0 or type == 'C': if type == 'A': shortcut", "training=training, trainable=training, momentum=0.997, epsilon=1e-5) elif type == 'GROUP': x =", "biases) return x def reduce_mean(self, x, indices=(1, 2)): x =", "stride, filters_out, bias=True): filters_in = x.get_shape()[-1] wshape = [ksize, ksize,", "as contrib from tensorflow.python.ipu import normalization_ops # This is all", "def residual(self, x, shortcut, out_filters, stride, type='B'): in_shape = shortcut.get_shape()", "weights, [1, stride, stride, 1], padding='SAME') if bias: bshape =", "shape=bshape, init=b_init) x = x + biases return x def", "shortcut.get_shape() pad = int(x.get_shape()[3] - in_shape[3]) if pad != 0", "filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights', shape=wshape, init=w_init) x", "return x def maxpool(self, x): x = tf.nn.max_pool( x, ksize=[1,", "x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0) with self.namescope('fc'): weights", "or type == 'C': if type == 'A': shortcut =", "as tf import tensorflow.contrib as contrib from tensorflow.python.ipu import normalization_ops", "is all written for: NHWC class TensorflowResNet(ResNet): def __init__(self, *args,", "[1, stride, stride, 1], padding='SAME') if bias: bshape = [filters_out]", "name, shape, init): return tf.get_variable(name, shape, initializer=init, dtype=self.dtype) def residual(self,", "stride, out_filters) shortcut = self.norm(shortcut) x = shortcut + x", "bshape = [filters_out] b_init = tf.zeros_initializer() biases = self._get_variable('biases', shape=bshape,", "scale=True, training=training, trainable=training, channels_axis=-1, reduction_axes=[-3, -2]) return x def fc(self,", "x = tf.nn.xw_plus_b(x, weights, biases) return x def reduce_mean(self, x,", "num_units_out): num_units_in = x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0)", "1]) shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0],", "This is all written for: NHWC class TensorflowResNet(ResNet): def __init__(self,", "= tf.layers.batch_normalization(x, fused=True, center=True, scale=True, training=training, trainable=training, momentum=0.997, epsilon=1e-5) elif", "instead. x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True, training=training, trainable=training, momentum=0.997,", "x def reduce_mean(self, x, indices=(1, 2)): x = tf.reduce_mean(x, reduction_indices=indices)", "tf.nn.relu(x) def conv(self, x, ksize, stride, filters_out, bias=True): filters_in =", "tensorflow.contrib as contrib from tensorflow.python.ipu import normalization_ops # This is", "shape, initializer=init, dtype=self.dtype) def residual(self, x, shortcut, out_filters, stride, type='B'):", "conv(self, x, ksize, stride, filters_out, bias=True): filters_in = x.get_shape()[-1] wshape", "filters_out, bias=True): filters_in = x.get_shape()[-1] wshape = [ksize, ksize, filters_in,", "def _get_variable(self, name, shape, init): return tf.get_variable(name, shape, initializer=init, dtype=self.dtype)", "TensorflowResNet(ResNet): def __init__(self, *args, **kwargs): self.dtype = tf.float16 super(TensorflowResNet, self).__init__(*args,", "def __init__(self, *args, **kwargs): self.dtype = tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs)", "self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init) biases = self._get_variable('biases', shape=[num_units_out], init=b_init) x", "type == 'GROUP': x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True, training=training,", "contrib from tensorflow.python.ipu import normalization_ops # This is all written", "groups=groups, center=True, scale=True, training=training, trainable=training, channels_axis=-1, reduction_axes=[-3, -2]) return x", "= self.norm(shortcut) x = shortcut + x x = self.relu(x)", "reduction_indices=indices) return x def maxpool(self, x): x = tf.nn.max_pool( x,", "with self.namescope('fc'): weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init) biases =", "filters_in, filters_out] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights', shape=wshape, init=w_init)", "self).__init__(*args, **kwargs) def _get_variable(self, name, shape, init): return tf.get_variable(name, shape,", "init=w_init) biases = self._get_variable('biases', shape=[num_units_out], init=b_init) x = tf.nn.xw_plus_b(x, weights,", "elif type == 'GROUP': x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,", "1, stride, out_filters) shortcut = self.norm(shortcut) x = shortcut +", "ksize, stride, filters_out, bias=True): filters_in = x.get_shape()[-1] wshape = [ksize,", "tensorflow.python.ipu import normalization_ops # This is all written for: NHWC", "tf.nn.fused_batch_norm instead. x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True, training=training, trainable=training,", "center=True, scale=True, training=training, trainable=training, momentum=0.997, epsilon=1e-5) elif type == 'GROUP':", "type='B'): in_shape = shortcut.get_shape() pad = int(x.get_shape()[3] - in_shape[3]) if", "import normalization_ops # This is all written for: NHWC class", "shape=[num_units_in, num_units_out], init=w_init) biases = self._get_variable('biases', shape=[num_units_out], init=b_init) x =", "= tf.zeros_initializer() biases = self._get_variable('biases', shape=bshape, init=b_init) x = x", "== 'C': if type == 'A': shortcut = tf.strided_slice(shortcut, [0,", "def norm(self, x, type='BATCH', groups=32, training=False): if type == 'BATCH':", "tf.reduce_mean(x, reduction_indices=indices) return x def maxpool(self, x): x = tf.nn.max_pool(", "shortcut = self.norm(shortcut) x = shortcut + x x =", "written for: NHWC class TensorflowResNet(ResNet): def __init__(self, *args, **kwargs): self.dtype", "self._get_variable('biases', shape=bshape, init=b_init) x = x + biases return x", "for: NHWC class TensorflowResNet(ResNet): def __init__(self, *args, **kwargs): self.dtype =", "int(x.get_shape()[3] - in_shape[3]) if pad != 0 or type ==", "training=False): if type == 'BATCH': # Perhaps use tf.nn.fused_batch_norm instead.", "x): x = tf.nn.max_pool( x, ksize=[1, 3, 3, 1], strides=[1,", "+ x x = self.relu(x) return x def relu(self, x):", "from models.resnet_base import ResNet import tensorflow.compat.v1 as tf import tensorflow.contrib", "= self.relu(x) return x def relu(self, x): return tf.nn.relu(x) def", "== 'BATCH': # Perhaps use tf.nn.fused_batch_norm instead. x = tf.layers.batch_normalization(x,", "init=b_init) x = x + biases return x def norm(self,", "reduce_mean(self, x, indices=(1, 2)): x = tf.reduce_mean(x, reduction_indices=indices) return x", "= [filters_out] b_init = tf.zeros_initializer() biases = self._get_variable('biases', shape=bshape, init=b_init)", "= tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])", "indices=(1, 2)): x = tf.reduce_mean(x, reduction_indices=indices) return x def maxpool(self,", "x = x + biases return x def norm(self, x,", "= self.conv(shortcut, 1, stride, out_filters) shortcut = self.norm(shortcut) x =", "from tensorflow.python.ipu import normalization_ops # This is all written for:", "= x + biases return x def norm(self, x, type='BATCH',", "x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True, training=training, trainable=training, channels_axis=-1, reduction_axes=[-3,", "= tf.constant_initializer(0.0) with self.namescope('fc'): weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init)", "**kwargs) def _get_variable(self, name, shape, init): return tf.get_variable(name, shape, initializer=init,", "norm(self, x, type='BATCH', groups=32, training=False): if type == 'BATCH': #", "3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') return x", "[0, 0], [0, pad]]) else: shortcut = self.conv(shortcut, 1, stride,", "2, 2, 1], padding='SAME') return x def namescope(self, debug_string): return", "trainable=training, momentum=0.997, epsilon=1e-5) elif type == 'GROUP': x = normalization_ops.group_norm(x,", "num_units_in = x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype) b_init = tf.constant_initializer(0.0) with", "NHWC class TensorflowResNet(ResNet): def __init__(self, *args, **kwargs): self.dtype = tf.float16", "initializer=init, dtype=self.dtype) def residual(self, x, shortcut, out_filters, stride, type='B'): in_shape", "tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape, strides=[1, stride, stride, 1])", "1], strides=[1, 2, 2, 1], padding='SAME') return x def namescope(self,", "**kwargs): self.dtype = tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs) def _get_variable(self, name,", "pad]]) else: shortcut = self.conv(shortcut, 1, stride, out_filters) shortcut =", "scale=True, training=training, trainable=training, momentum=0.997, epsilon=1e-5) elif type == 'GROUP': x", "self._get_variable('biases', shape=[num_units_out], init=b_init) x = tf.nn.xw_plus_b(x, weights, biases) return x", "x = tf.nn.max_pool( x, ksize=[1, 3, 3, 1], strides=[1, 2,", "0, 0], in_shape, strides=[1, stride, stride, 1]) shortcut = tf.pad(shortcut,", "self.norm(shortcut) x = shortcut + x x = self.relu(x) return", "tf.nn.xw_plus_b(x, weights, biases) return x def reduce_mean(self, x, indices=(1, 2)):", "in_shape = shortcut.get_shape() pad = int(x.get_shape()[3] - in_shape[3]) if pad", "0], [0, 0], [0, 0], [0, pad]]) else: shortcut =", "x = self.relu(x) return x def relu(self, x): return tf.nn.relu(x)", "self._get_variable('weights', shape=wshape, init=w_init) x = tf.nn.conv2d(x, weights, [1, stride, stride,", "padding='SAME') if bias: bshape = [filters_out] b_init = tf.zeros_initializer() biases", "= tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs) def _get_variable(self, name, shape, init):", "= self._get_variable('biases', shape=bshape, init=b_init) x = x + biases return", "= contrib.layers.xavier_initializer(dtype=self.dtype) weights = self._get_variable('weights', shape=wshape, init=w_init) x = tf.nn.conv2d(x,", "Perhaps use tf.nn.fused_batch_norm instead. x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,", "*args, **kwargs): self.dtype = tf.float16 super(TensorflowResNet, self).__init__(*args, **kwargs) def _get_variable(self,", "if pad != 0 or type == 'C': if type", "channels_axis=-1, reduction_axes=[-3, -2]) return x def fc(self, x, num_units_out): num_units_in", "init): return tf.get_variable(name, shape, initializer=init, dtype=self.dtype) def residual(self, x, shortcut,", "[0, 0, 0, 0], in_shape, strides=[1, stride, stride, 1]) shortcut", "shape=[num_units_out], init=b_init) x = tf.nn.xw_plus_b(x, weights, biases) return x def", "def fc(self, x, num_units_out): num_units_in = x.get_shape()[1] w_init = contrib.layers.xavier_initializer(dtype=self.dtype)", "x def maxpool(self, x): x = tf.nn.max_pool( x, ksize=[1, 3,", "maxpool(self, x): x = tf.nn.max_pool( x, ksize=[1, 3, 3, 1],", "center=True, scale=True, training=training, trainable=training, channels_axis=-1, reduction_axes=[-3, -2]) return x def", "import tensorflow.contrib as contrib from tensorflow.python.ipu import normalization_ops # This", "else: shortcut = self.conv(shortcut, 1, stride, out_filters) shortcut = self.norm(shortcut)", "tf.get_variable(name, shape, initializer=init, dtype=self.dtype) def residual(self, x, shortcut, out_filters, stride,", "2019 Graphcore Ltd. from models.resnet_base import ResNet import tensorflow.compat.v1 as", "weights = self._get_variable('weights', shape=wshape, init=w_init) x = tf.nn.conv2d(x, weights, [1,", "'BATCH': # Perhaps use tf.nn.fused_batch_norm instead. x = tf.layers.batch_normalization(x, fused=True,", "trainable=training, channels_axis=-1, reduction_axes=[-3, -2]) return x def fc(self, x, num_units_out):", "weights, biases) return x def reduce_mean(self, x, indices=(1, 2)): x", "== 'A': shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape," ]
[ "name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True), ), migrations.AlterField( model_name='profile', name='profileTelephoneNumber', field=models.CharField(blank=True, max_length=15,", "18:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "] operations = [ migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True),", "# Generated by Django 3.1.4 on 2020-12-05 18:46 from django.db", "by Django 3.1.4 on 2020-12-05 18:46 from django.db import migrations,", "models class Migration(migrations.Migration): dependencies = [ ('app', '0020_auto_20201204_2324'), ] operations", "migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True), ), migrations.AlterField( model_name='profile', name='profileTelephoneNumber',", "on 2020-12-05 18:46 from django.db import migrations, models class Migration(migrations.Migration):", "('app', '0020_auto_20201204_2324'), ] operations = [ migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True,", "class Migration(migrations.Migration): dependencies = [ ('app', '0020_auto_20201204_2324'), ] operations =", "Migration(migrations.Migration): dependencies = [ ('app', '0020_auto_20201204_2324'), ] operations = [", "= [ migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True), ), migrations.AlterField(", "<filename>backend/app/migrations/0021_auto_20201205_1846.py<gh_stars>0 # Generated by Django 3.1.4 on 2020-12-05 18:46 from", "null=True), ), migrations.AlterField( model_name='profile', name='profileTelephoneNumber', field=models.CharField(blank=True, max_length=15, null=True), ), ]", "'0020_auto_20201204_2324'), ] operations = [ migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30,", "2020-12-05 18:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True), ), migrations.AlterField( model_name='profile', name='profileTelephoneNumber', field=models.CharField(blank=True,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "= [ ('app', '0020_auto_20201204_2324'), ] operations = [ migrations.AlterField( model_name='profile',", "[ migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True), ), migrations.AlterField( model_name='profile',", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0020_auto_20201204_2324'),", "operations = [ migrations.AlterField( model_name='profile', name='profileBankAccountNr', field=models.CharField(blank=True, max_length=30, null=True), ),", "Django 3.1.4 on 2020-12-05 18:46 from django.db import migrations, models", "3.1.4 on 2020-12-05 18:46 from django.db import migrations, models class", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0020_auto_20201204_2324'), ]", "Generated by Django 3.1.4 on 2020-12-05 18:46 from django.db import", "field=models.CharField(blank=True, max_length=30, null=True), ), migrations.AlterField( model_name='profile', name='profileTelephoneNumber', field=models.CharField(blank=True, max_length=15, null=True),", "max_length=30, null=True), ), migrations.AlterField( model_name='profile', name='profileTelephoneNumber', field=models.CharField(blank=True, max_length=15, null=True), ),", "dependencies = [ ('app', '0020_auto_20201204_2324'), ] operations = [ migrations.AlterField(", "[ ('app', '0020_auto_20201204_2324'), ] operations = [ migrations.AlterField( model_name='profile', name='profileBankAccountNr'," ]
[ "= \"gis/masked nodes.txt\" logger = logging.getLogger(__name__) def get_node_ids(shps, masked): merged", "else: outdata = append_output(output_cdf) init_output_vars(outdata, **vars(args)) # Attempts to use", "is the workaround if not args.input_vars: args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)] if", "method. def colon_meta(string): var, attr = string.split(':', 2) return (var,", "enumerate(shps): df = gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug(\"Shapefile {0} has {1}", "nodes.shp\" masked_nodes_txt = \"gis/masked nodes.txt\" logger = logging.getLogger(__name__) def get_node_ids(shps,", "= len(c.dimensions['time']) data = copy_data(c, outdata, i, node_ids, **vars(args)) i", "== InputAttr.ALL: siglayers = indata['siglay'][:] if 'siglay' in indata.variables else", "nodes left after masking\".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274,", "domain nodes.shp\" masked_nodes_txt = \"gis/masked nodes.txt\" logger = logging.getLogger(__name__) def", "the photic zone attr_strings = { \"all\": InputAttr.ALL, \"bottom\": InputAttr.BOTTOM", "= (time.perf_counter() - start_time) to_go = elapsed * (times_ct /", "chunk_times c.close() elapsed = (time.perf_counter() - start_time) to_go = elapsed", "file\") parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta, action=\"append\", help=\"Extract the values of a", "# Iterate over all output variables # If an extraction", "domain_nodes_shp = \"gis/ssm domain nodes.shp\" masked_nodes_txt = \"gis/masked nodes.txt\" logger", "masking\".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778,", "len(nodes)) nodeVar = output.createVariable('node', \"i4\", ('node',)) output['node'][:] = nodes timeVar", "https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n): \"\"\"Yield successive n-sized chunks from lst.\"\"\"", "has {1} nodes\".format(shp, len(df))) if merged is None: merged =", "args = parser.parse_args() # This is the workaround if not", "append_output(output_cdf): return Dataset(output_cdf, 'a') def init_output_vars(output, **kwargs): args = Namespace(**kwargs)", "\"--chunk-size\", type=int, dest=\"chunk_size\", help=\"Process this many CDF files at once\")", "> 1 else Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time']) data = copy_data(c,", "data = cdfin[var][:, slc, node_ids - 1] logger.debug(\"data is shape", "lists here, see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args", "file...\") if not os.path.exists(output_cdf): outdata = init_output(output_cdf, indata, node_ids, **vars(args))", "c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0]) chunk_times", "MFDatasets are # created for only a few netCDF files", "= [] logger.info(\"Caching input files...\") for infile in args.incdf: newpath", "CDF file (created if it doesn't exist)\") parser.add_argument(\"outprefix\", help=\"a prefix", "nodeVar = output.createVariable('node', \"i4\", ('node',)) output['node'][:] = nodes timeVar =", "(created if it doesn't exist)\") parser.add_argument(\"outprefix\", help=\"a prefix for the", "out_name += \"_bottom\" # TODO add \"photic\" case which will", "file\") parser.add_argument(\"outcdf\", help=\"the output CDF file (created if it doesn't", "gpd import numpy as np domain_nodes_shp = \"gis/ssm domain nodes.shp\"", "Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time']) data = copy_data(c, outdata, i, node_ids,", "append_output(output_cdf) init_output_vars(outdata, **vars(args)) # Attempts to use the entire MFDataset", "InputAttr.ALL else ('time','node') output.createVariable(out_name, 'f4', dims) # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks", "directory\") # Cannot include default values of lists here, see", "out_name = args.outprefix + var if attr == InputAttr.ALL: slc", "= nodes timeVar = output.createVariable('time', \"f4\", ('time',)) # Iterate over", "- 1) total += np.sum([d.size * d.itemsize for k,d in", "return Dataset(output_cdf, 'a') def init_output_vars(output, **kwargs): args = Namespace(**kwargs) for", "= get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing output file...\") if not os.path.exists(output_cdf): outdata", "in chunks(exist_cdfs, args.chunk_size): c = MFDataset(cdfchunk) if len(cdfchunk) > 1", "extraction...\") start_time = time.perf_counter() times_ct = outdata.dimensions['time'].size for cdfchunk in", "ALL = 0 BOTTOM = 1 # TODO add \"photic\"", "i += chunk_times c.close() elapsed = (time.perf_counter() - start_time) to_go", "action=\"store_true\", help=\"Use a read/write cache in a temporary directory\") #", "slice(None) elif attr == InputAttr.BOTTOM: slc = -1 out_name +=", "logging.getLogger(__name__) def get_node_ids(shps, masked): merged = None for i,shp in", "is \"all\": # - add the 'siglay' dimension to the", "= outdata.dimensions['time'].size for cdfchunk in chunks(exist_cdfs, args.chunk_size): c = MFDataset(cdfchunk)", "Expands an input variable argument into a variable name and", "found {0} nodes in {1} shapefiles\".format( len(merged), len(shps))) masked_nodes =", "merged is None: merged = df.index else: merged = merged.union(df.index)", "def main(): script_home = os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description=\"Extract data from", "Namespace(**kwargs) times_ct = len(cdfin.dimensions['time']) alldata = {} # Copy zeta", "{4}KBps)\".format(i, times_ct, int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction finished.\") outdata.close() def copy_data(cdfin,", "len(lst), n): yield lst[i:i+n] class InputAttr(Enum): ALL = 0 BOTTOM", "dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify a different masked nodes text file\") parser.add_argument(\"--invar\",", "to a blocking approach where MFDatasets are # created for", "geopandas as gpd import numpy as np domain_nodes_shp = \"gis/ssm", "var, attr in args.input_vars: out_name = args.outprefix + var if", "data.items()]) logger.info(\"{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)\".format(i, times_ct, int(elapsed),", "} # Expands an input variable argument into a variable", "import time import os import tempfile import shutil import logging", "once\") parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\", help=\"Use a read/write cache in a", "the output variable # - add a 'zeta' output variable", "if attr == InputAttr.BOTTOM: out_name += \"_bottom\" # TODO handle", "i = 0 total = 0 logger.info(\"Beginning extraction...\") start_time =", "netCDF4 import Dataset, MFDataset import geopandas as gpd import numpy", "os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description=\"Extract data from SSM netcdf output files\")", "zone attr_strings = { \"all\": InputAttr.ALL, \"bottom\": InputAttr.BOTTOM } #", "if attr == InputAttr.ALL else ('time','node') output.createVariable(out_name, 'f4', dims) #", "#logger.setLevel(logging.DEBUG) if args.cache: with tempfile.TemporaryDirectory() as tmpdir: exist_cdfs = []", "= [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG) if", "siglayers = indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay',", "TODO add \"photic\" case which will look rather different data", "as gpd import numpy as np domain_nodes_shp = \"gis/ssm domain", "attr == InputAttr.BOTTOM: out_name += \"_bottom\" # TODO handle photic", "= Dataset(output_cdf, \"w\") timeDim = output.createDimension('time', len(indata.dimensions['time'])) nodeDim = output.createDimension('node',", "= data alldata[out_name] = data return alldata if __name__ ==", "if it's needed if 'zeta' in cdfout.variables: alldata['zeta'] = cdfin['zeta'][:,", "i,shp in enumerate(shps): df = gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug(\"Shapefile {0}", "[os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache:", "action=\"append\", help=\"Specify a domain node shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify", "return output def append_output(output_cdf): return Dataset(output_cdf, 'a') def init_output_vars(output, **kwargs):", "attribute is \"all\": # - add the 'siglay' dimension to", "{1} shapefiles\".format( len(merged), len(shps))) masked_nodes = np.loadtxt(masked) merged = merged.difference(masked_nodes)", "include the 'siglay' dimension on the output variable # -", "\"gis/masked nodes.txt\" logger = logging.getLogger(__name__) def get_node_ids(shps, masked): merged =", "TODO add \"photic\" for the photic zone attr_strings = {", "extraction\") parser.add_argument(\"-c\", \"--chunk-size\", type=int, dest=\"chunk_size\", help=\"Process this many CDF files", "= np.loadtxt(masked) merged = merged.difference(masked_nodes) logger.debug(\"{0} nodes left after masking\".format(len(merged)))", "= gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug(\"Shapefile {0} has {1} nodes\".format(shp, len(df)))", "timeVar = output.createVariable('time', \"f4\", ('time',)) # Iterate over all output", "files at once\") parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\", help=\"Use a read/write cache", "output file...\") if not os.path.exists(output_cdf): outdata = init_output(output_cdf, indata, node_ids,", "newpath) exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info(\"Caching output", "well. # Instead, I'm resorting to a blocking approach where", "if os.path.exists(args.outcdf): logger.info(\"Caching output file...\") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args))", "of a different output variable\") parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Print", "# TODO handle photic case dims = ('time','siglay','node') if attr", "temporary directory\") # Cannot include default values of lists here,", "output file...\") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args)) # Copy the", "all output variables # If an extraction attribute is \"all\":", "help=\"the output CDF file (created if it doesn't exist)\") parser.add_argument(\"outprefix\",", "for var, attr in args.input_vars: out_name = args.outprefix + var", "times_ct = len(cdfin.dimensions['time']) alldata = {} # Copy zeta if", "len(exist_cdfs) > 1 else Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing", "masked nodes text file\") parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta, action=\"append\", help=\"Extract the", "{1} nodes\".format(shp, len(df))) if merged is None: merged = df.index", "data alldata[out_name] = data return alldata if __name__ == \"__main__\":", "# https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args = parser.parse_args() #", "\"\"\"Yield successive n-sized chunks from lst.\"\"\" for i in range(0,", "('time','node') output.createVariable(out_name, 'f4', dims) # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst,", "[-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075", "\"f4\", ('time',)) # Iterate over all output variables # If", "Iterate over all output variables # If an extraction attribute", "logger.info(\"Extraction finished.\") outdata.close() def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs): args", "the workaround if not args.input_vars: args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)] if not", "-0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075 ]", "output['node'][:] = nodes timeVar = output.createVariable('time', \"f4\", ('time',)) # Iterate", "data = copy_data(c, outdata, i, node_ids, **vars(args)) i += chunk_times", "args.input_vars: if attr == InputAttr.ALL: siglayers = indata['siglay'][:] if 'siglay'", "- add a 'zeta' output variable for var, attr in", "BOTTOM = 1 # TODO add \"photic\" for the photic", "during the extraction\") parser.add_argument(\"-c\", \"--chunk-size\", type=int, dest=\"chunk_size\", help=\"Process this many", "newpath = os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir,", "outdata.dimensions['time'].size for cdfchunk in chunks(exist_cdfs, args.chunk_size): c = MFDataset(cdfchunk) if", "node_ids, **vars(args)) i += chunk_times c.close() elapsed = (time.perf_counter() -", "int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction finished.\") outdata.close() def copy_data(cdfin, cdfout, timeidx,", "**vars(args)) # Copy the resulting output CDF back logger.info(\"Saving output", "successive n-sized chunks from lst.\"\"\" for i in range(0, len(lst),", "-0.78467834, -0.9269075 ] def init_output(output_cdf, indata, nodes, **kwargs): args =", "args.outcdf) logger.info(\"Finished.\") else: do_extract(args.incdf, args.outcdf, **vars(args)) def do_extract(exist_cdfs, output_cdf, **kwargs):", "len(cdfchunk) > 1 else Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time']) data =", "after masking\".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949,", "elapsed = (time.perf_counter() - start_time) to_go = elapsed * (times_ct", "InputAttr.BOTTOM } # Expands an input variable argument into a", "= \"gis/ssm domain nodes.shp\" masked_nodes_txt = \"gis/masked nodes.txt\" logger =", "different masked nodes text file\") parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta, action=\"append\", help=\"Extract", "= data else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name] = data return", "numpy as np domain_nodes_shp = \"gis/ssm domain nodes.shp\" masked_nodes_txt =", "blocking approach where MFDatasets are # created for only a", "siglayers if 'zeta' in indata.variables: output.createVariable('zeta', 'f4', ('time','node')) break return", "from SSM netcdf output files\") parser.add_argument(\"incdf\", nargs=\"+\", help=\"each input CDF", "CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify a domain node shapefile\") parser.add_argument(\"-m\",", "values of a different output variable\") parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\",", "for i,shp in enumerate(shps): df = gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug(\"Shapefile", "args.masked_nodes_file) logger.info(\"Initializing output file...\") if not os.path.exists(output_cdf): outdata = init_output(output_cdf,", "'a') def init_output_vars(output, **kwargs): args = Namespace(**kwargs) for var, attr", "{} # Copy zeta if it's needed if 'zeta' in", "**kwargs): args = Namespace(**kwargs) logger.info(\"Determining scope of work...\") indata =", "dimension on the output variable # - add a 'zeta'", "\"i4\", ('node',)) output['node'][:] = nodes timeVar = output.createVariable('time', \"f4\", ('time',))", "= { \"all\": InputAttr.ALL, \"bottom\": InputAttr.BOTTOM } # Expands an", "if not args.input_vars: args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)] if not args.domain_node_shapefiles: args.domain_node_shapefiles", "def do_extract(exist_cdfs, output_cdf, **kwargs): args = Namespace(**kwargs) logger.info(\"Determining scope of", "elapsed * (times_ct / i - 1) total += np.sum([d.size", "a variable name and an attribute # describing the vertical", "InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name] =", "else ('time','node') output.createVariable(out_name, 'f4', dims) # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def", "for the photic zone attr_strings = { \"all\": InputAttr.ALL, \"bottom\":", "attr == InputAttr.BOTTOM: slc = -1 out_name += \"_bottom\" #", "nodes timeVar = output.createVariable('time', \"f4\", ('time',)) # Iterate over all", "len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:] = siglayers if 'zeta' in", "it doesn't exist)\") parser.add_argument(\"outprefix\", help=\"a prefix for the extracted variables", "logger.debug(\"{0} nodes left after masking\".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139,", "== InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name]", "not already present # - include the 'siglay' dimension on", "text file\") parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta, action=\"append\", help=\"Extract the values of", "InputAttr.BOTTOM: out_name += \"_bottom\" # TODO handle photic case dims", "'zeta' in indata.variables: output.createVariable('zeta', 'f4', ('time','node')) break return output def", "= [-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834,", "parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify a domain node shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\",", "python3 import time import os import tempfile import shutil import", "FileType from netCDF4 import Dataset, MFDataset import geopandas as gpd", "different data = cdfin[var][:, slc, node_ids - 1] logger.debug(\"data is", "help=\"Specify a domain node shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify a", "masked_nodes_txt = \"gis/masked nodes.txt\" logger = logging.getLogger(__name__) def get_node_ids(shps, masked):", "tempfile import shutil import logging from enum import Enum from", "= slice(None) elif attr == InputAttr.BOTTOM: slc = -1 out_name", "len(c.dimensions['time']) data = copy_data(c, outdata, i, node_ids, **vars(args)) i +=", "a domain node shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify a different", "output.createDimension('node', len(nodes)) nodeVar = output.createVariable('node', \"i4\", ('node',)) output['node'][:] = nodes", "parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta, action=\"append\", help=\"Extract the values of a different", "def init_output_vars(output, **kwargs): args = Namespace(**kwargs) for var, attr in", "slc = -1 out_name += \"_bottom\" # TODO add \"photic\"", "are # created for only a few netCDF files at", "= args.outprefix + var if attr == InputAttr.ALL: slc =", "see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args = parser.parse_args()", "name and an attribute # describing the vertical extraction method.", "a read/write cache in a temporary directory\") # Cannot include", "= cdfin[var][:, slc, node_ids - 1] logger.debug(\"data is shape \"", "-0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075 ] def init_output(output_cdf, indata, nodes,", "the output if it's not already present # - include", "init_output_vars(output, **kwargs): args = Namespace(**kwargs) for var, attr in args.input_vars:", "InputAttr.BOTTOM: slc = -1 out_name += \"_bottom\" # TODO add", "+ str(data.shape)) if attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else:", "= ArgumentParser(description=\"Extract data from SSM netcdf output files\") parser.add_argument(\"incdf\", nargs=\"+\",", "/ i - 1) total += np.sum([d.size * d.itemsize for", "= indata['time'][:] / 3600 / 24 else: outdata = append_output(output_cdf)", "merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996,", "as np domain_nodes_shp = \"gis/ssm domain nodes.shp\" masked_nodes_txt = \"gis/masked", "few netCDF files at a time indata.close() i = 0", "Cannot include default values of lists here, see # https://bugs.python.org/issue16399", "# Copy zeta if it's needed if 'zeta' in cdfout.variables:", "1 else Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing output file...\")", "timeDim = output.createDimension('time', len(indata.dimensions['time'])) nodeDim = output.createDimension('node', len(nodes)) nodeVar =", "**kwargs): args = Namespace(**kwargs) for var, attr in args.input_vars: out_name", "get_node_ids(shps, masked): merged = None for i,shp in enumerate(shps): df", ":] = alldata['zeta'] for var, attr in args.input_vars: out_name =", "InputAttr.ALL: siglayers = indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS", "chunk_times = len(c.dimensions['time']) data = copy_data(c, outdata, i, node_ids, **vars(args))", "indata.close() i = 0 total = 0 logger.info(\"Beginning extraction...\") start_time", "InputAttr.ALL, \"bottom\": InputAttr.BOTTOM } # Expands an input variable argument", "= init_output(output_cdf, indata, node_ids, **vars(args)) outdata['time'][:] = indata['time'][:] / 3600", "shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify a different masked nodes text", "output.createDimension('time', len(indata.dimensions['time'])) nodeDim = output.createDimension('node', len(nodes)) nodeVar = output.createVariable('node', \"i4\",", "len(merged), len(shps))) masked_nodes = np.loadtxt(masked) merged = merged.difference(masked_nodes) logger.debug(\"{0} nodes", "ArgumentParser(description=\"Extract data from SSM netcdf output files\") parser.add_argument(\"incdf\", nargs=\"+\", help=\"each", "os import tempfile import shutil import logging from enum import", "it's not already present # - include the 'siglay' dimension", "= -1 out_name += \"_bottom\" # TODO add \"photic\" case", "photic zone attr_strings = { \"all\": InputAttr.ALL, \"bottom\": InputAttr.BOTTOM }", "extraction method. def colon_meta(string): var, attr = string.split(':', 2) return", "cdfin[var][:, slc, node_ids - 1] logger.debug(\"data is shape \" +", "= 0 logger.info(\"Beginning extraction...\") start_time = time.perf_counter() times_ct = outdata.dimensions['time'].size", "indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0]) node_ids", "rather different data = cdfin[var][:, slc, node_ids - 1] logger.debug(\"data", "is shape \" + str(data.shape)) if attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:]", "os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info(\"Caching output file...\") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf,", "the extraction\") parser.add_argument(\"-c\", \"--chunk-size\", type=int, dest=\"chunk_size\", help=\"Process this many CDF", "gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug(\"Shapefile {0} has {1} nodes\".format(shp, len(df))) if", "var if attr == InputAttr.BOTTOM: out_name += \"_bottom\" # TODO", "if args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache: with tempfile.TemporaryDirectory() as", "InputAttr(Enum): ALL = 0 BOTTOM = 1 # TODO add", "merged.difference(masked_nodes) logger.debug(\"{0} nodes left after masking\".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS =", "the values of a different output variable\") parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",", "args.outprefix + var if attr == InputAttr.ALL: slc = slice(None)", "help=\"Use a read/write cache in a temporary directory\") # Cannot", "output_cdf, **kwargs): args = Namespace(**kwargs) logger.info(\"Determining scope of work...\") indata", "= os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf))", "created for only a few netCDF files at a time", "dims = ('time','siglay','node') if attr == InputAttr.ALL else ('time','node') output.createVariable(out_name,", "exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info(\"Caching output file...\")", "-0.65060186, -0.78467834, -0.9269075 ] def init_output(output_cdf, indata, nodes, **kwargs): args", "help=\"a prefix for the extracted variables in the output CDF\")", "in the output CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify a domain", "# Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n): \"\"\"Yield successive n-sized", "as tmpdir: exist_cdfs = [] logger.info(\"Caching input files...\") for infile", "if args.cache: with tempfile.TemporaryDirectory() as tmpdir: exist_cdfs = [] logger.info(\"Caching", "use the entire MFDataset don't seem to scale well. #", "scope of work...\") indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1", "= args.outprefix + var if attr == InputAttr.BOTTOM: out_name +=", "output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args)) # Copy the resulting output CDF", "logger.info(\"Finished.\") else: do_extract(args.incdf, args.outcdf, **vars(args)) def do_extract(exist_cdfs, output_cdf, **kwargs): args", "0 BOTTOM = 1 # TODO add \"photic\" for the", "args.incdf: newpath = os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf =", "progress messages during the extraction\") parser.add_argument(\"-c\", \"--chunk-size\", type=int, dest=\"chunk_size\", help=\"Process", "in args.input_vars: if attr == InputAttr.ALL: siglayers = indata['siglay'][:] if", "({2}s elapsed, {3}s to go, {4}KBps)\".format(i, times_ct, int(elapsed), int(to_go), int(total/elapsed/1000)))", "+= np.sum([d.size * d.itemsize for k,d in data.items()]) logger.info(\"{0}/{1} ({2}s", "handle photic case dims = ('time','siglay','node') if attr == InputAttr.ALL", "variable argument into a variable name and an attribute #", "logger.debug(\"data is shape \" + str(data.shape)) if attr == InputAttr.ALL:", "not args.input_vars: args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)] if not args.domain_node_shapefiles: args.domain_node_shapefiles =", "Enum from argparse import ArgumentParser, Namespace, FileType from netCDF4 import", "**kwargs): args = Namespace(**kwargs) times_ct = len(cdfin.dimensions['time']) alldata = {}", "var, attr in args.input_vars: if attr == InputAttr.ALL: siglayers =", "out_name = args.outprefix + var if attr == InputAttr.BOTTOM: out_name", "in data.items()]) logger.info(\"{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)\".format(i, times_ct,", "node_ids, **vars(args)) outdata['time'][:] = indata['time'][:] / 3600 / 24 else:", "# created for only a few netCDF files at a", "nodes, **kwargs): args = Namespace(**kwargs) output = Dataset(output_cdf, \"w\") timeDim", "MFDataset don't seem to scale well. # Instead, I'm resorting", "time indata.close() i = 0 total = 0 logger.info(\"Beginning extraction...\")", "args.outprefix + var if attr == InputAttr.BOTTOM: out_name += \"_bottom\"", "output if it's not already present # - include the", "return (var, attr_strings[attr]) def main(): script_home = os.path.dirname(os.path.realpath(__file__)) parser =", "in args.incdf: newpath = os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf", "in range(0, len(lst), n): yield lst[i:i+n] class InputAttr(Enum): ALL =", "(time.perf_counter() - start_time) to_go = elapsed * (times_ct / i", "add \"photic\" case which will look rather different data =", "extraction attribute is \"all\": # - add the 'siglay' dimension", "**kwargs): args = Namespace(**kwargs) output = Dataset(output_cdf, \"w\") timeDim =", "indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay',", "seem to scale well. # Instead, I'm resorting to a", "output.createVariable('zeta', 'f4', ('time','node')) break return output def append_output(output_cdf): return Dataset(output_cdf,", "enum import Enum from argparse import ArgumentParser, Namespace, FileType from", "and an attribute # describing the vertical extraction method. def", "this many CDF files at once\") parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\", help=\"Use", "scale well. # Instead, I'm resorting to a blocking approach", "InputAttr.ALL: slc = slice(None) elif attr == InputAttr.BOTTOM: slc =", "import geopandas as gpd import numpy as np domain_nodes_shp =", "= MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0]) node_ids =", "elapsed, {3}s to go, {4}KBps)\".format(i, times_ct, int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction", "which will look rather different data = cdfin[var][:, slc, node_ids", "(times_ct / i - 1) total += np.sum([d.size * d.itemsize", "not args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose else", "n): yield lst[i:i+n] class InputAttr(Enum): ALL = 0 BOTTOM =", "action=\"append\", help=\"Extract the values of a different output variable\") parser.add_argument(\"-v\",", "# describing the vertical extraction method. def colon_meta(string): var, attr", "import os import tempfile import shutil import logging from enum", "input CDF file\") parser.add_argument(\"outcdf\", help=\"the output CDF file (created if", "alldata = {} # Copy zeta if it's needed if", "variables # If an extraction attribute is \"all\": # -", "args.chunk_size): c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0])", "indata, nodes, **kwargs): args = Namespace(**kwargs) output = Dataset(output_cdf, \"w\")", "https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args = parser.parse_args() # This", "[(\"DOXG\",InputAttr.BOTTOM)] if not args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if", "help=\"Print progress messages during the extraction\") parser.add_argument(\"-c\", \"--chunk-size\", type=int, dest=\"chunk_size\",", "= Namespace(**kwargs) output = Dataset(output_cdf, \"w\") timeDim = output.createDimension('time', len(indata.dimensions['time']))", "args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG)", "Dataset(output_cdf, \"w\") timeDim = output.createDimension('time', len(indata.dimensions['time'])) nodeDim = output.createDimension('node', len(nodes))", "case which will look rather different data = cdfin[var][:, slc,", "k,d in data.items()]) logger.info(\"{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)\".format(i,", "else: merged = merged.union(df.index) logger.debug(\"get_node_ids found {0} nodes in {1}", "] def init_output(output_cdf, indata, nodes, **kwargs): args = Namespace(**kwargs) output", "help=\"Specify a different masked nodes text file\") parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta,", "# TODO add \"photic\" case which will look rather different", "photic case dims = ('time','siglay','node') if attr == InputAttr.ALL else", "SSM netcdf output files\") parser.add_argument(\"incdf\", nargs=\"+\", help=\"each input CDF file\")", "total = 0 logger.info(\"Beginning extraction...\") start_time = time.perf_counter() times_ct =", "variable for var, attr in args.input_vars: if attr == InputAttr.ALL:", "nodes.txt\" logger = logging.getLogger(__name__) def get_node_ids(shps, masked): merged = None", "yield lst[i:i+n] class InputAttr(Enum): ALL = 0 BOTTOM = 1", "Copy zeta if it's needed if 'zeta' in cdfout.variables: alldata['zeta']", "Namespace(**kwargs) for var, attr in args.input_vars: out_name = args.outprefix +", "to the output if it's not already present # -", "output variable\") parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Print progress messages during", "init_output(output_cdf, indata, nodes, **kwargs): args = Namespace(**kwargs) output = Dataset(output_cdf,", "for cdfchunk in chunks(exist_cdfs, args.chunk_size): c = MFDataset(cdfchunk) if len(cdfchunk)", "# - add a 'zeta' output variable for var, attr", "lst.\"\"\" for i in range(0, len(lst), n): yield lst[i:i+n] class", "n): \"\"\"Yield successive n-sized chunks from lst.\"\"\" for i in", "len(cdfin.dimensions['time']) alldata = {} # Copy zeta if it's needed", "data from SSM netcdf output files\") parser.add_argument(\"incdf\", nargs=\"+\", help=\"each input", "# Cannot include default values of lists here, see #", "logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache: with tempfile.TemporaryDirectory()", "parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Print progress messages during the extraction\")", "script_home = os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description=\"Extract data from SSM netcdf", "argparse import ArgumentParser, Namespace, FileType from netCDF4 import Dataset, MFDataset", "chunks from lst.\"\"\" for i in range(0, len(lst), n): yield", "in indata.variables: output.createVariable('zeta', 'f4', ('time','node')) break return output def append_output(output_cdf):", "output CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify a domain node shapefile\")", "domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache: with", "not os.path.exists(output_cdf): outdata = init_output(output_cdf, indata, node_ids, **vars(args)) outdata['time'][:] =", "= output.createDimension('node', len(nodes)) nodeVar = output.createVariable('node', \"i4\", ('node',)) output['node'][:] =", "output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info(\"Caching output file...\") shutil.copy(args.outcdf,", "- include the 'siglay' dimension on the output variable #", "if merged is None: merged = df.index else: merged =", "output variable for var, attr in args.input_vars: if attr ==", "files...\") for infile in args.incdf: newpath = os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile,", "with tempfile.TemporaryDirectory() as tmpdir: exist_cdfs = [] logger.info(\"Caching input files...\")", "variables in the output CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify a", "tempfile.TemporaryDirectory() as tmpdir: exist_cdfs = [] logger.info(\"Caching input files...\") for", "approach where MFDatasets are # created for only a few", "= indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers))", "= string.split(':', 2) return (var, attr_strings[attr]) def main(): script_home =", "logger.info(\"Initializing output file...\") if not os.path.exists(output_cdf): outdata = init_output(output_cdf, indata,", "if not os.path.exists(output_cdf): outdata = init_output(output_cdf, indata, node_ids, **vars(args)) outdata['time'][:]", "the output CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify a domain node", "i in range(0, len(lst), n): yield lst[i:i+n] class InputAttr(Enum): ALL", "= 0 total = 0 logger.info(\"Beginning extraction...\") start_time = time.perf_counter()", "Attempts to use the entire MFDataset don't seem to scale", "slc = slice(None) elif attr == InputAttr.BOTTOM: slc = -1", "('siglay',)) output['siglay'][:] = siglayers if 'zeta' in indata.variables: output.createVariable('zeta', 'f4',", "i, node_ids, **vars(args)) i += chunk_times c.close() elapsed = (time.perf_counter()", "+= \"_bottom\" # TODO add \"photic\" case which will look", "shapefiles\".format( len(merged), len(shps))) masked_nodes = np.loadtxt(masked) merged = merged.difference(masked_nodes) logger.debug(\"{0}", "alldata['zeta'] for var, attr in args.input_vars: out_name = args.outprefix +", "tmpdir: exist_cdfs = [] logger.info(\"Caching input files...\") for infile in", "= Namespace(**kwargs) for var, attr in args.input_vars: out_name = args.outprefix", "dimension to the output if it's not already present #", "\" + str(data.shape)) if attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data", "masked_nodes_txt)) args = parser.parse_args() # This is the workaround if", "-0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075 ] def init_output(output_cdf, indata,", "d.itemsize for k,d in data.items()]) logger.info(\"{0}/{1} ({2}s elapsed, {3}s to", "in enumerate(shps): df = gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug(\"Shapefile {0} has", "i - 1) total += np.sum([d.size * d.itemsize for k,d", "output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:] = siglayers if 'zeta'", "of lists here, see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt))", "logger.info(\"Saving output file...\") shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\") else: do_extract(args.incdf, args.outcdf, **vars(args))", "ArgumentParser, Namespace, FileType from netCDF4 import Dataset, MFDataset import geopandas", "CDF file\") parser.add_argument(\"outcdf\", help=\"the output CDF file (created if it", "- 1] logger.debug(\"data is shape \" + str(data.shape)) if attr", "logging from enum import Enum from argparse import ArgumentParser, Namespace,", "needed if 'zeta' in cdfout.variables: alldata['zeta'] = cdfin['zeta'][:, node_ids -", "output.createVariable('node', \"i4\", ('node',)) output['node'][:] = nodes timeVar = output.createVariable('time', \"f4\",", "os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf):", "in a temporary directory\") # Cannot include default values of", "outdata = append_output(output_cdf) init_output_vars(outdata, **vars(args)) # Attempts to use the", "total += np.sum([d.size * d.itemsize for k,d in data.items()]) logger.info(\"{0}/{1}", "file...\") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args)) # Copy the resulting", "else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:] = siglayers", "input variable argument into a variable name and an attribute", "attr_strings[attr]) def main(): script_home = os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description=\"Extract data", "else Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time']) data = copy_data(c, outdata, i,", "- start_time) to_go = elapsed * (times_ct / i -", "1] cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta'] for var, attr", "= os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info(\"Caching output file...\") shutil.copy(args.outcdf, output_cdf)", "nodes in {1} shapefiles\".format( len(merged), len(shps))) masked_nodes = np.loadtxt(masked) merged", "an attribute # describing the vertical extraction method. def colon_meta(string):", "shape \" + str(data.shape)) if attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] =", "if it doesn't exist)\") parser.add_argument(\"outprefix\", help=\"a prefix for the extracted", "cache in a temporary directory\") # Cannot include default values", "cdfout, timeidx, node_ids, **kwargs): args = Namespace(**kwargs) times_ct = len(cdfin.dimensions['time'])", "'f4', dims) # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n): \"\"\"Yield", "copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs): args = Namespace(**kwargs) times_ct =", "DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:] = siglayers if", "import ArgumentParser, Namespace, FileType from netCDF4 import Dataset, MFDataset import", "args.input_vars: out_name = args.outprefix + var if attr == InputAttr.ALL:", "args = Namespace(**kwargs) for var, attr in args.input_vars: out_name =", "outdata.close() def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs): args = Namespace(**kwargs)", "init_output_vars(outdata, **vars(args)) # Attempts to use the entire MFDataset don't", "= {} # Copy zeta if it's needed if 'zeta'", "output def append_output(output_cdf): return Dataset(output_cdf, 'a') def init_output_vars(output, **kwargs): args", "\"_bottom\" # TODO handle photic case dims = ('time','siglay','node') if", "MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles,", "masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args = parser.parse_args() # This is the workaround", "{3}s to go, {4}KBps)\".format(i, times_ct, int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction finished.\")", "if attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else: cdfout[out_name][timeidx:timeidx+times_ct,:] =", "vertical extraction method. def colon_meta(string): var, attr = string.split(':', 2)", "the 'siglay' dimension to the output if it's not already", "<reponame>bedaro/ssm-analysis<gh_stars>0 #!/usr/bin/env python3 import time import os import tempfile import", "node shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify a different masked nodes", "logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache: with tempfile.TemporaryDirectory() as tmpdir: exist_cdfs =", "np.sum([d.size * d.itemsize for k,d in data.items()]) logger.info(\"{0}/{1} ({2}s elapsed,", "len(indata.dimensions['time'])) nodeDim = output.createDimension('node', len(nodes)) nodeVar = output.createVariable('node', \"i4\", ('node',))", "len(df))) if merged is None: merged = df.index else: merged", "main(): script_home = os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description=\"Extract data from SSM", "= os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description=\"Extract data from SSM netcdf output", "a different output variable\") parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Print progress", "\"w\") timeDim = output.createDimension('time', len(indata.dimensions['time'])) nodeDim = output.createDimension('node', len(nodes)) nodeVar", "{0} nodes in {1} shapefiles\".format( len(merged), len(shps))) masked_nodes = np.loadtxt(masked)", "**vars(args)) # Attempts to use the entire MFDataset don't seem", "shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args)) # Copy the resulting output", "24 else: outdata = append_output(output_cdf) init_output_vars(outdata, **vars(args)) # Attempts to", "an input variable argument into a variable name and an", "for the extracted variables in the output CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\",", "node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing output file...\") if not os.path.exists(output_cdf):", "do_extract(exist_cdfs, output_cdf, **kwargs): args = Namespace(**kwargs) logger.info(\"Determining scope of work...\")", "= append_output(output_cdf) init_output_vars(outdata, **vars(args)) # Attempts to use the entire", "start_time) to_go = elapsed * (times_ct / i - 1)", "MFDataset import geopandas as gpd import numpy as np domain_nodes_shp", "merged = df.index else: merged = merged.union(df.index) logger.debug(\"get_node_ids found {0}", "a temporary directory\") # Cannot include default values of lists", "for i in range(0, len(lst), n): yield lst[i:i+n] class InputAttr(Enum):", "from enum import Enum from argparse import ArgumentParser, Namespace, FileType", "df.set_index('node_id', inplace=True) logger.debug(\"Shapefile {0} has {1} nodes\".format(shp, len(df))) if merged", "the vertical extraction method. def colon_meta(string): var, attr = string.split(':',", "node_ids - 1] logger.debug(\"data is shape \" + str(data.shape)) if", "parser.add_argument(\"outprefix\", help=\"a prefix for the extracted variables in the output", "inplace=True) logger.debug(\"Shapefile {0} has {1} nodes\".format(shp, len(df))) if merged is", "in cdfout.variables: alldata['zeta'] = cdfin['zeta'][:, node_ids - 1] cdfout['zeta'][timeidx:timeidx +", "(var, attr_strings[attr]) def main(): script_home = os.path.dirname(os.path.realpath(__file__)) parser = ArgumentParser(description=\"Extract", "df.index else: merged = merged.union(df.index) logger.debug(\"get_node_ids found {0} nodes in", "an extraction attribute is \"all\": # - add the 'siglay'", "0 total = 0 logger.info(\"Beginning extraction...\") start_time = time.perf_counter() times_ct", "else: do_extract(args.incdf, args.outcdf, **vars(args)) def do_extract(exist_cdfs, output_cdf, **kwargs): args =", "def chunks(lst, n): \"\"\"Yield successive n-sized chunks from lst.\"\"\" for", "args.verbose else logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache: with tempfile.TemporaryDirectory() as tmpdir:", "CDF back logger.info(\"Saving output file...\") shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\") else: do_extract(args.incdf,", "int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction finished.\") outdata.close() def copy_data(cdfin, cdfout, timeidx, node_ids,", "= ('time','siglay','node') if attr == InputAttr.ALL else ('time','node') output.createVariable(out_name, 'f4',", "> 1 else Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing output", "import Dataset, MFDataset import geopandas as gpd import numpy as", "merged = None for i,shp in enumerate(shps): df = gpd.read_file(shp)", "values of lists here, see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home,", "time.perf_counter() times_ct = outdata.dimensions['time'].size for cdfchunk in chunks(exist_cdfs, args.chunk_size): c", "finished.\") outdata.close() def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs): args =", "-1 out_name += \"_bottom\" # TODO add \"photic\" case which", "in {1} shapefiles\".format( len(merged), len(shps))) masked_nodes = np.loadtxt(masked) merged =", "logger = logging.getLogger(__name__) def get_node_ids(shps, masked): merged = None for", "{ \"all\": InputAttr.ALL, \"bottom\": InputAttr.BOTTOM } # Expands an input", "attribute # describing the vertical extraction method. def colon_meta(string): var,", "= MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0]) chunk_times =", "DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186,", "workaround if not args.input_vars: args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)] if not args.domain_node_shapefiles:", "read/write cache in a temporary directory\") # Cannot include default", "'zeta' output variable for var, attr in args.input_vars: if attr", "output files\") parser.add_argument(\"incdf\", nargs=\"+\", help=\"each input CDF file\") parser.add_argument(\"outcdf\", help=\"the", "for only a few netCDF files at a time indata.close()", "colon_meta(string): var, attr = string.split(':', 2) return (var, attr_strings[attr]) def", "indata, node_ids, **vars(args)) outdata['time'][:] = indata['time'][:] / 3600 / 24", "in args.input_vars: out_name = args.outprefix + var if attr ==", "a blocking approach where MFDatasets are # created for only", "import logging from enum import Enum from argparse import ArgumentParser,", "copy_data(c, outdata, i, node_ids, **vars(args)) i += chunk_times c.close() elapsed", "parser.parse_args() # This is the workaround if not args.input_vars: args.input_vars", "args.input_vars: args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)] if not args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home,", "= cdfin['zeta'][:, node_ids - 1] cdfout['zeta'][timeidx:timeidx + times_ct, :] =", "def colon_meta(string): var, attr = string.split(':', 2) return (var, attr_strings[attr])", "logger.info(\"Caching output file...\") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args)) # Copy", "init_output(output_cdf, indata, node_ids, **vars(args)) outdata['time'][:] = indata['time'][:] / 3600 /", "already present # - include the 'siglay' dimension on the", "-0.52520996, -0.65060186, -0.78467834, -0.9269075 ] def init_output(output_cdf, indata, nodes, **kwargs):", "\"all\": # - add the 'siglay' dimension to the output", "os.path.exists(args.outcdf): logger.info(\"Caching output file...\") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs, output_cdf, **vars(args)) #", "def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs): args = Namespace(**kwargs) times_ct", "cdfout.variables: alldata['zeta'] = cdfin['zeta'][:, node_ids - 1] cdfout['zeta'][timeidx:timeidx + times_ct,", "the extracted variables in the output CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\",", "\"all\": InputAttr.ALL, \"bottom\": InputAttr.BOTTOM } # Expands an input variable", "at a time indata.close() i = 0 total = 0", "= elapsed * (times_ct / i - 1) total +=", "describing the vertical extraction method. def colon_meta(string): var, attr =", "('time','node')) break return output def append_output(output_cdf): return Dataset(output_cdf, 'a') def", "**vars(args)) outdata['time'][:] = indata['time'][:] / 3600 / 24 else: outdata", "type=int, dest=\"chunk_size\", help=\"Process this many CDF files at once\") parser.add_argument(\"--cache\",", "logger.info(\"Beginning extraction...\") start_time = time.perf_counter() times_ct = outdata.dimensions['time'].size for cdfchunk", "go, {4}KBps)\".format(i, times_ct, int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction finished.\") outdata.close() def", "# Expands an input variable argument into a variable name", "help=\"each input CDF file\") parser.add_argument(\"outcdf\", help=\"the output CDF file (created", "chunks(exist_cdfs, args.chunk_size): c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else", "time import os import tempfile import shutil import logging from", "output.createVariable(out_name, 'f4', dims) # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n):", "nodeDim = output.createDimension('node', len(nodes)) nodeVar = output.createVariable('node', \"i4\", ('node',)) output['node'][:]", "[] logger.info(\"Caching input files...\") for infile in args.incdf: newpath =", "n-sized chunks from lst.\"\"\" for i in range(0, len(lst), n):", "c.close() elapsed = (time.perf_counter() - start_time) to_go = elapsed *", "args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING)", "cdfin['zeta'][:, node_ids - 1] cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta']", "shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\") else: do_extract(args.incdf, args.outcdf, **vars(args)) def do_extract(exist_cdfs, output_cdf,", "= 0 BOTTOM = 1 # TODO add \"photic\" for", "df = gpd.read_file(shp) df.set_index('node_id', inplace=True) logger.debug(\"Shapefile {0} has {1} nodes\".format(shp,", "where MFDatasets are # created for only a few netCDF", "indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:] =", "if 'zeta' in indata.variables: output.createVariable('zeta', 'f4', ('time','node')) break return output", "to use the entire MFDataset don't seem to scale well.", "# - include the 'siglay' dimension on the output variable", "nodes text file\") parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta, action=\"append\", help=\"Extract the values", "different output variable\") parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Print progress messages", "os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info(\"Caching output file...\") shutil.copy(args.outcdf, output_cdf) do_extract(exist_cdfs,", "cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name] = data return alldata if __name__", "merged.union(df.index) logger.debug(\"get_node_ids found {0} nodes in {1} shapefiles\".format( len(merged), len(shps)))", "= logging.getLogger(__name__) def get_node_ids(shps, masked): merged = None for i,shp", "len(shps))) masked_nodes = np.loadtxt(masked) merged = merged.difference(masked_nodes) logger.debug(\"{0} nodes left", "**vars(args)) def do_extract(exist_cdfs, output_cdf, **kwargs): args = Namespace(**kwargs) logger.info(\"Determining scope", "None for i,shp in enumerate(shps): df = gpd.read_file(shp) df.set_index('node_id', inplace=True)", "do_extract(exist_cdfs, output_cdf, **vars(args)) # Copy the resulting output CDF back", "args.cache: with tempfile.TemporaryDirectory() as tmpdir: exist_cdfs = [] logger.info(\"Caching input", "= output.createVariable('time', \"f4\", ('time',)) # Iterate over all output variables", "os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf)) if", "it's needed if 'zeta' in cdfout.variables: alldata['zeta'] = cdfin['zeta'][:, node_ids", "+ var if attr == InputAttr.ALL: slc = slice(None) elif", "'f4', ('time','node')) break return output def append_output(output_cdf): return Dataset(output_cdf, 'a')", "from netCDF4 import Dataset, MFDataset import geopandas as gpd import", "attr in args.input_vars: out_name = args.outprefix + var if attr", "output_cdf, **vars(args)) # Copy the resulting output CDF back logger.info(\"Saving", "node_ids, **kwargs): args = Namespace(**kwargs) times_ct = len(cdfin.dimensions['time']) alldata =", "\"photic\" case which will look rather different data = cdfin[var][:,", "= output.createVariable('node', \"i4\", ('node',)) output['node'][:] = nodes timeVar = output.createVariable('time',", "default values of lists here, see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False,", "files\") parser.add_argument(\"incdf\", nargs=\"+\", help=\"each input CDF file\") parser.add_argument(\"outcdf\", help=\"the output", "import numpy as np domain_nodes_shp = \"gis/ssm domain nodes.shp\" masked_nodes_txt", "parser.add_argument(\"-c\", \"--chunk-size\", type=int, dest=\"chunk_size\", help=\"Process this many CDF files at", "argument into a variable name and an attribute # describing", "import tempfile import shutil import logging from enum import Enum", "zeta if it's needed if 'zeta' in cdfout.variables: alldata['zeta'] =", "class InputAttr(Enum): ALL = 0 BOTTOM = 1 # TODO", "variable\") parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Print progress messages during the", "cdfchunk in chunks(exist_cdfs, args.chunk_size): c = MFDataset(cdfchunk) if len(cdfchunk) >", "for k,d in data.items()]) logger.info(\"{0}/{1} ({2}s elapsed, {3}s to go,", "merged = merged.union(df.index) logger.debug(\"get_node_ids found {0} nodes in {1} shapefiles\".format(", "a few netCDF files at a time indata.close() i =", "0 logger.info(\"Beginning extraction...\") start_time = time.perf_counter() times_ct = outdata.dimensions['time'].size for", "os.path.exists(output_cdf): outdata = init_output(output_cdf, indata, node_ids, **vars(args)) outdata['time'][:] = indata['time'][:]", "alldata['zeta'] = cdfin['zeta'][:, node_ids - 1] cdfout['zeta'][timeidx:timeidx + times_ct, :]", "= merged.union(df.index) logger.debug(\"get_node_ids found {0} nodes in {1} shapefiles\".format( len(merged),", "logger.info(\"Caching input files...\") for infile in args.incdf: newpath = os.path.join(tmpdir,", "of work...\") indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else", "# TODO add \"photic\" for the photic zone attr_strings =", "slc, node_ids - 1] logger.debug(\"data is shape \" + str(data.shape))", "Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n): \"\"\"Yield successive n-sized chunks", "/ 24 else: outdata = append_output(output_cdf) init_output_vars(outdata, **vars(args)) # Attempts", "input files...\") for infile in args.incdf: newpath = os.path.join(tmpdir, os.path.basename(infile))", "== InputAttr.ALL else ('time','node') output.createVariable(out_name, 'f4', dims) # Gotten from", "= merged.difference(masked_nodes) logger.debug(\"{0} nodes left after masking\".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS", "files at a time indata.close() i = 0 total =", "variable name and an attribute # describing the vertical extraction", "-0.9269075 ] def init_output(output_cdf, indata, nodes, **kwargs): args = Namespace(**kwargs)", "if it's not already present # - include the 'siglay'", "back logger.info(\"Saving output file...\") shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\") else: do_extract(args.incdf, args.outcdf,", "{0} has {1} nodes\".format(shp, len(df))) if merged is None: merged", "masked_nodes = np.loadtxt(masked) merged = merged.difference(masked_nodes) logger.debug(\"{0} nodes left after", "left after masking\".format(len(merged))) return merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974,", "parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args = parser.parse_args() # This is", "= parser.parse_args() # This is the workaround if not args.input_vars:", "don't seem to scale well. # Instead, I'm resorting to", "* d.itemsize for k,d in data.items()]) logger.info(\"{0}/{1} ({2}s elapsed, {3}s", "in indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:]", "+= chunk_times c.close() elapsed = (time.perf_counter() - start_time) to_go =", "dest=\"input_vars\", type=colon_meta, action=\"append\", help=\"Extract the values of a different output", "output variables # If an extraction attribute is \"all\": #", "= None for i,shp in enumerate(shps): df = gpd.read_file(shp) df.set_index('node_id',", "timeidx, node_ids, **kwargs): args = Namespace(**kwargs) times_ct = len(cdfin.dimensions['time']) alldata", "== InputAttr.BOTTOM: out_name += \"_bottom\" # TODO handle photic case", "file...\") shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\") else: do_extract(args.incdf, args.outcdf, **vars(args)) def do_extract(exist_cdfs,", "('time','siglay','node') if attr == InputAttr.ALL else ('time','node') output.createVariable(out_name, 'f4', dims)", "logger.debug(\"Shapefile {0} has {1} nodes\".format(shp, len(df))) if merged is None:", "parser.add_argument(\"incdf\", nargs=\"+\", help=\"each input CDF file\") parser.add_argument(\"outcdf\", help=\"the output CDF", "from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n): \"\"\"Yield successive n-sized chunks from", "doesn't exist)\") parser.add_argument(\"outprefix\", help=\"a prefix for the extracted variables in", "the entire MFDataset don't seem to scale well. # Instead,", "out_name += \"_bottom\" # TODO handle photic case dims =", "only a few netCDF files at a time indata.close() i", "add a 'zeta' output variable for var, attr in args.input_vars:", "else Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing output file...\") if", "I'm resorting to a blocking approach where MFDatasets are #", "dest=\"verbose\", help=\"Print progress messages during the extraction\") parser.add_argument(\"-c\", \"--chunk-size\", type=int,", "times_ct = outdata.dimensions['time'].size for cdfchunk in chunks(exist_cdfs, args.chunk_size): c =", "= copy_data(c, outdata, i, node_ids, **vars(args)) i += chunk_times c.close()", "node_ids - 1] cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta'] for", "here, see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4, verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args =", "the 'siglay' dimension on the output variable # - add", "attr == InputAttr.ALL: slc = slice(None) elif attr == InputAttr.BOTTOM:", "if not args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO if args.verbose", "into a variable name and an attribute # describing the", "file (created if it doesn't exist)\") parser.add_argument(\"outprefix\", help=\"a prefix for", "exist_cdfs = [] logger.info(\"Caching input files...\") for infile in args.incdf:", "attr in args.input_vars: if attr == InputAttr.ALL: siglayers = indata['siglay'][:]", "get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing output file...\") if not os.path.exists(output_cdf): outdata =", "dest=\"cache\", action=\"store_true\", help=\"Use a read/write cache in a temporary directory\")", "'siglay' dimension on the output variable # - add a", "times_ct, int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction finished.\") outdata.close() def copy_data(cdfin, cdfout,", "args = Namespace(**kwargs) output = Dataset(output_cdf, \"w\") timeDim = output.createDimension('time',", "parser.add_argument(\"outcdf\", help=\"the output CDF file (created if it doesn't exist)\")", "1 else Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time']) data = copy_data(c, outdata,", "break return output def append_output(output_cdf): return Dataset(output_cdf, 'a') def init_output_vars(output,", "output CDF file (created if it doesn't exist)\") parser.add_argument(\"outprefix\", help=\"a", "from argparse import ArgumentParser, Namespace, FileType from netCDF4 import Dataset,", "chunks(lst, n): \"\"\"Yield successive n-sized chunks from lst.\"\"\" for i", "start_time = time.perf_counter() times_ct = outdata.dimensions['time'].size for cdfchunk in chunks(exist_cdfs,", "* (times_ct / i - 1) total += np.sum([d.size *", "args.input_vars: out_name = args.outprefix + var if attr == InputAttr.BOTTOM:", "-0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075 ] def", "str(data.shape)) if attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else: cdfout[out_name][timeidx:timeidx+times_ct,:]", "output CDF back logger.info(\"Saving output file...\") shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\") else:", "= Namespace(**kwargs) logger.info(\"Determining scope of work...\") indata = MFDataset(exist_cdfs) if", "at once\") parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\", help=\"Use a read/write cache in", "over all output variables # If an extraction attribute is", "Namespace(**kwargs) output = Dataset(output_cdf, \"w\") timeDim = output.createDimension('time', len(indata.dimensions['time'])) nodeDim", "attr = string.split(':', 2) return (var, attr_strings[attr]) def main(): script_home", "cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta'] for var, attr in", "= Namespace(**kwargs) times_ct = len(cdfin.dimensions['time']) alldata = {} # Copy", "'siglay' dimension to the output if it's not already present", "\"_bottom\" # TODO add \"photic\" case which will look rather", "#!/usr/bin/env python3 import time import os import tempfile import shutil", "if attr == InputAttr.ALL: siglayers = indata['siglay'][:] if 'siglay' in", "'siglay' in indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4', ('siglay',))", "nargs=\"+\", help=\"each input CDF file\") parser.add_argument(\"outcdf\", help=\"the output CDF file", "shutil import logging from enum import Enum from argparse import", "masked): merged = None for i,shp in enumerate(shps): df =", "If an extraction attribute is \"all\": # - add the", "if len(cdfchunk) > 1 else Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time']) data", "nodes\".format(shp, len(df))) if merged is None: merged = df.index else:", "prefix for the extracted variables in the output CDF\") parser.add_argument(\"-d\",", "lst[i:i+n] class InputAttr(Enum): ALL = 0 BOTTOM = 1 #", "args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)] if not args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)]", "outdata = init_output(output_cdf, indata, node_ids, **vars(args)) outdata['time'][:] = indata['time'][:] /", "include default values of lists here, see # https://bugs.python.org/issue16399 parser.set_defaults(chunk_size=4,", "will look rather different data = cdfin[var][:, slc, node_ids -", "np.loadtxt(masked) merged = merged.difference(masked_nodes) logger.debug(\"{0} nodes left after masking\".format(len(merged))) return", "def get_node_ids(shps, masked): merged = None for i,shp in enumerate(shps):", "present # - include the 'siglay' dimension on the output", "look rather different data = cdfin[var][:, slc, node_ids - 1]", "('time',)) # Iterate over all output variables # If an", "**vars(args)) i += chunk_times c.close() elapsed = (time.perf_counter() - start_time)", "output.createVariable('time', \"f4\", ('time',)) # Iterate over all output variables #", "args = Namespace(**kwargs) logger.info(\"Determining scope of work...\") indata = MFDataset(exist_cdfs)", "logger.info(\"Determining scope of work...\") indata = MFDataset(exist_cdfs) if len(exist_cdfs) >", "from lst.\"\"\" for i in range(0, len(lst), n): yield lst[i:i+n]", "attr == InputAttr.ALL: siglayers = indata['siglay'][:] if 'siglay' in indata.variables", "var, attr = string.split(':', 2) return (var, attr_strings[attr]) def main():", "# If an extraction attribute is \"all\": # - add", "== InputAttr.ALL: slc = slice(None) elif attr == InputAttr.BOTTOM: slc", "3600 / 24 else: outdata = append_output(output_cdf) init_output_vars(outdata, **vars(args)) #", "netCDF files at a time indata.close() i = 0 total", "= output.createDimension('time', len(indata.dimensions['time'])) nodeDim = output.createDimension('node', len(nodes)) nodeVar = output.createVariable('node',", "Dataset(output_cdf, 'a') def init_output_vars(output, **kwargs): args = Namespace(**kwargs) for var,", "do_extract(args.incdf, args.outcdf, **vars(args)) def do_extract(exist_cdfs, output_cdf, **kwargs): args = Namespace(**kwargs)", "= df.index else: merged = merged.union(df.index) logger.debug(\"get_node_ids found {0} nodes", "- add the 'siglay' dimension to the output if it's", "\"photic\" for the photic zone attr_strings = { \"all\": InputAttr.ALL,", "dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify a domain node shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'),", "output.createVariable('siglay', 'f4', ('siglay',)) output['siglay'][:] = siglayers if 'zeta' in indata.variables:", "domain node shapefile\") parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify a different masked", "messages during the extraction\") parser.add_argument(\"-c\", \"--chunk-size\", type=int, dest=\"chunk_size\", help=\"Process this", "if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file)", "elif attr == InputAttr.BOTTOM: slc = -1 out_name += \"_bottom\"", "\"gis/ssm domain nodes.shp\" masked_nodes_txt = \"gis/masked nodes.txt\" logger = logging.getLogger(__name__)", "exist)\") parser.add_argument(\"outprefix\", help=\"a prefix for the extracted variables in the", "outdata, i, node_ids, **vars(args)) i += chunk_times c.close() elapsed =", "resulting output CDF back logger.info(\"Saving output file...\") shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\")", "data else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name] = data return alldata", "1] logger.debug(\"data is shape \" + str(data.shape)) if attr ==", "infile in args.incdf: newpath = os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath) exist_cdfs.append(newpath)", "# Attempts to use the entire MFDataset don't seem to", "else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name] = data return alldata if", "a different masked nodes text file\") parser.add_argument(\"--invar\", dest=\"input_vars\", type=colon_meta, action=\"append\",", "# Copy the resulting output CDF back logger.info(\"Saving output file...\")", "Dataset(exist_cdfs[0]) node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file) logger.info(\"Initializing output file...\") if not", "def append_output(output_cdf): return Dataset(output_cdf, 'a') def init_output_vars(output, **kwargs): args =", "= alldata['zeta'] for var, attr in args.input_vars: out_name = args.outprefix", "merged = merged.difference(masked_nodes) logger.debug(\"{0} nodes left after masking\".format(len(merged))) return merged.to_numpy()", "range(0, len(lst), n): yield lst[i:i+n] class InputAttr(Enum): ALL = 0", "extracted variables in the output CDF\") parser.add_argument(\"-d\", dest=\"domain_node_shapefiles\", action=\"append\", help=\"Specify", "is None: merged = df.index else: merged = merged.union(df.index) logger.debug(\"get_node_ids", "np domain_nodes_shp = \"gis/ssm domain nodes.shp\" masked_nodes_txt = \"gis/masked nodes.txt\"", "for var, attr in args.input_vars: if attr == InputAttr.ALL: siglayers", "case dims = ('time','siglay','node') if attr == InputAttr.ALL else ('time','node')", "to_go = elapsed * (times_ct / i - 1) total", "help=\"Process this many CDF files at once\") parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\",", "a 'zeta' output variable for var, attr in args.input_vars: if", "logger.info(\"{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)\".format(i, times_ct, int(elapsed), int(to_go),", "+ var if attr == InputAttr.BOTTOM: out_name += \"_bottom\" #", "= [(\"DOXG\",InputAttr.BOTTOM)] if not args.domain_node_shapefiles: args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)] logging.basicConfig(level=logging.INFO", "Instead, I'm resorting to a blocking approach where MFDatasets are", "a time indata.close() i = 0 total = 0 logger.info(\"Beginning", "return merged.to_numpy() DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567,", "output file...\") shutil.copy(output_cdf, args.outcdf) logger.info(\"Finished.\") else: do_extract(args.incdf, args.outcdf, **vars(args)) def", "output = Dataset(output_cdf, \"w\") timeDim = output.createDimension('time', len(indata.dimensions['time'])) nodeDim =", "add \"photic\" for the photic zone attr_strings = { \"all\":", "netcdf output files\") parser.add_argument(\"incdf\", nargs=\"+\", help=\"each input CDF file\") parser.add_argument(\"outcdf\",", "string.split(':', 2) return (var, attr_strings[attr]) def main(): script_home = os.path.dirname(os.path.realpath(__file__))", "\"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Print progress messages during the extraction\") parser.add_argument(\"-c\",", "-0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075 ] def init_output(output_cdf,", "'f4', ('siglay',)) output['siglay'][:] = siglayers if 'zeta' in indata.variables: output.createVariable('zeta',", "def init_output(output_cdf, indata, nodes, **kwargs): args = Namespace(**kwargs) output =", "to scale well. # Instead, I'm resorting to a blocking", "parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\", help=\"Use a read/write cache in a temporary", "on the output variable # - add a 'zeta' output", "dest=\"chunk_size\", help=\"Process this many CDF files at once\") parser.add_argument(\"--cache\", dest=\"cache\",", "import Enum from argparse import ArgumentParser, Namespace, FileType from netCDF4", "output variable # - add a 'zeta' output variable for", "parser.add_argument(\"-m\", dest=\"masked_nodes_file\", type=FileType('r'), help=\"Specify a different masked nodes text file\")", "if attr == InputAttr.ALL: slc = slice(None) elif attr ==", "args = Namespace(**kwargs) times_ct = len(cdfin.dimensions['time']) alldata = {} #", "1 # TODO add \"photic\" for the photic zone attr_strings", "'zeta' in cdfout.variables: alldata['zeta'] = cdfin['zeta'][:, node_ids - 1] cdfout['zeta'][timeidx:timeidx", "+= \"_bottom\" # TODO handle photic case dims = ('time','siglay','node')", "TODO handle photic case dims = ('time','siglay','node') if attr ==", "indata['time'][:] / 3600 / 24 else: outdata = append_output(output_cdf) init_output_vars(outdata,", "parser = ArgumentParser(description=\"Extract data from SSM netcdf output files\") parser.add_argument(\"incdf\",", "to go, {4}KBps)\".format(i, times_ct, int(elapsed), int(to_go), int(total/elapsed/1000))) logger.info(\"Extraction finished.\") outdata.close()", "- 1] cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta'] for var,", "args.outcdf, **vars(args)) def do_extract(exist_cdfs, output_cdf, **kwargs): args = Namespace(**kwargs) logger.info(\"Determining", "# - add the 'siglay' dimension to the output if", "This is the workaround if not args.input_vars: args.input_vars = [(\"DOXG\",InputAttr.BOTTOM)]", "\"bottom\": InputAttr.BOTTOM } # Expands an input variable argument into", "('node',)) output['node'][:] = nodes timeVar = output.createVariable('time', \"f4\", ('time',)) #", "import shutil import logging from enum import Enum from argparse", "indata.variables: output.createVariable('zeta', 'f4', ('time','node')) break return output def append_output(output_cdf): return", "shutil.copy(infile, newpath) exist_cdfs.append(newpath) output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf)) if os.path.exists(args.outcdf): logger.info(\"Caching", "1) total += np.sum([d.size * d.itemsize for k,d in data.items()])", "help=\"Extract the values of a different output variable\") parser.add_argument(\"-v\", \"--verbose\",", "action=\"store_true\", dest=\"verbose\", help=\"Print progress messages during the extraction\") parser.add_argument(\"-c\", \"--chunk-size\",", "else logging.WARNING) #logger.setLevel(logging.DEBUG) if args.cache: with tempfile.TemporaryDirectory() as tmpdir: exist_cdfs", "the resulting output CDF back logger.info(\"Saving output file...\") shutil.copy(output_cdf, args.outcdf)", "dims) # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks def chunks(lst, n): \"\"\"Yield successive", "type=colon_meta, action=\"append\", help=\"Extract the values of a different output variable\")", "type=FileType('r'), help=\"Specify a different masked nodes text file\") parser.add_argument(\"--invar\", dest=\"input_vars\",", "Copy the resulting output CDF back logger.info(\"Saving output file...\") shutil.copy(output_cdf,", "Namespace(**kwargs) logger.info(\"Determining scope of work...\") indata = MFDataset(exist_cdfs) if len(exist_cdfs)", "cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data alldata[out_name] = data", "2) return (var, attr_strings[attr]) def main(): script_home = os.path.dirname(os.path.realpath(__file__)) parser", "int(total/elapsed/1000))) logger.info(\"Extraction finished.\") outdata.close() def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs):", "= 1 # TODO add \"photic\" for the photic zone", "# This is the workaround if not args.input_vars: args.input_vars =", "add the 'siglay' dimension to the output if it's not", "= siglayers if 'zeta' in indata.variables: output.createVariable('zeta', 'f4', ('time','node')) break", "for infile in args.incdf: newpath = os.path.join(tmpdir, os.path.basename(infile)) shutil.copy(infile, newpath)", "= time.perf_counter() times_ct = outdata.dimensions['time'].size for cdfchunk in chunks(exist_cdfs, args.chunk_size):", "if 'siglay' in indata.variables else DEFAULT_SIGLAYERS output.createDimension('siglay', len(siglayers)) output.createVariable('siglay', 'f4',", "+ times_ct, :] = alldata['zeta'] for var, attr in args.input_vars:", "MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0]) chunk_times = len(c.dimensions['time'])", "= len(cdfin.dimensions['time']) alldata = {} # Copy zeta if it's", "logger.debug(\"get_node_ids found {0} nodes in {1} shapefiles\".format( len(merged), len(shps))) masked_nodes", "Namespace, FileType from netCDF4 import Dataset, MFDataset import geopandas as", "output['siglay'][:] = siglayers if 'zeta' in indata.variables: output.createVariable('zeta', 'f4', ('time','node'))", "times_ct, :] = alldata['zeta'] for var, attr in args.input_vars: out_name", "many CDF files at once\") parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\", help=\"Use a", "== InputAttr.BOTTOM: slc = -1 out_name += \"_bottom\" # TODO", "attr == InputAttr.ALL else ('time','node') output.createVariable(out_name, 'f4', dims) # Gotten", "/ 3600 / 24 else: outdata = append_output(output_cdf) init_output_vars(outdata, **vars(args))", "var if attr == InputAttr.ALL: slc = slice(None) elif attr", "if 'zeta' in cdfout.variables: alldata['zeta'] = cdfin['zeta'][:, node_ids - 1]", "attr == InputAttr.ALL: cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data else: cdfout[out_name][timeidx:timeidx+times_ct,:] = data", "attr_strings = { \"all\": InputAttr.ALL, \"bottom\": InputAttr.BOTTOM } # Expands", "# Instead, I'm resorting to a blocking approach where MFDatasets", "entire MFDataset don't seem to scale well. # Instead, I'm", "work...\") indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0])", "alldata[out_name] = data return alldata if __name__ == \"__main__\": main()", "Dataset, MFDataset import geopandas as gpd import numpy as np", "CDF files at once\") parser.add_argument(\"--cache\", dest=\"cache\", action=\"store_true\", help=\"Use a read/write", "resorting to a blocking approach where MFDatasets are # created", "outdata['time'][:] = indata['time'][:] / 3600 / 24 else: outdata =", "verbose=False, masked_nodes_file=os.path.join(script_home, masked_nodes_txt)) args = parser.parse_args() # This is the", "variable # - add a 'zeta' output variable for var,", "None: merged = df.index else: merged = merged.union(df.index) logger.debug(\"get_node_ids found" ]
[ "logging import getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import", "evaluate(self, test_data): \"\"\" use model to test data Args: test_data", "result = self.model.run(test_data) batch = {'route': test_data['route'], 'result': result, 'rd_nwk':", "test data Args: test_data \"\"\" result = self.model.run(test_data) batch =", "self.model.run(test_data) batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch)", "from logging import getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils", "Args: test_data \"\"\" result = self.model.run(test_data) batch = {'route': test_data['route'],", "self._logger = getLogger() def evaluate(self, test_data): \"\"\" use model to", "\"\"\" use model to test data Args: test_data \"\"\" result", "def __init__(self, config, model): self.model = model self.config = config", "Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader \"\"\" pass # do nothing", "class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model): self.model = model self.config", "model to test data Args: test_data \"\"\" result = self.model.run(test_data)", "to test data Args: test_data \"\"\" result = self.model.run(test_data) batch", "train(self, train_dataloader, eval_dataloader): \"\"\" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader", "libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model): self.model", "get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model): self.model = model", "getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import get_evaluator class", "self.evaluator = get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger = getLogger() def", "get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger = getLogger() def evaluate(self, test_data):", "= './libcity/cache/evaluate_cache' self._logger = getLogger() def evaluate(self, test_data): \"\"\" use", "= config self.evaluator = get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger =", "'./libcity/cache/evaluate_cache' self._logger = getLogger() def evaluate(self, test_data): \"\"\" use model", "= self.model.run(test_data) batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']}", "def train(self, train_dataloader, eval_dataloader): \"\"\" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader):", "= get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger = getLogger() def evaluate(self,", "test_data): \"\"\" use model to test data Args: test_data \"\"\"", "<filename>libcity/executor/map_matching_executor.py from logging import getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from", "'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader):", "model): self.model = model self.config = config self.evaluator = get_evaluator(config)", "\"\"\" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader \"\"\" pass #", "from libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model):", "对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader \"\"\" pass # do", "model self.config = config self.evaluator = get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache'", "import AbstractTraditionExecutor from libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self,", "use model to test data Args: test_data \"\"\" result =", "test_data \"\"\" result = self.model.run(test_data) batch = {'route': test_data['route'], 'result':", "libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def", "AbstractTraditionExecutor from libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config,", "batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir)", "getLogger() def evaluate(self, test_data): \"\"\" use model to test data", "\"\"\" result = self.model.run(test_data) batch = {'route': test_data['route'], 'result': result,", "self.config = config self.evaluator = get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger", "from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor):", "def evaluate(self, test_data): \"\"\" use model to test data Args:", "= {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def", "'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader): \"\"\" 对于传统模型,不需要训练", "{'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self,", "self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger = getLogger() def evaluate(self, test_data): \"\"\"", "data Args: test_data \"\"\" result = self.model.run(test_data) batch = {'route':", "result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader): \"\"\"", "= model self.config = config self.evaluator = get_evaluator(config) self.evaluate_res_dir =", "eval_dataloader): \"\"\" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader \"\"\" pass", "config self.evaluator = get_evaluator(config) self.evaluate_res_dir = './libcity/cache/evaluate_cache' self._logger = getLogger()", "import getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import get_evaluator", "= getLogger() def evaluate(self, test_data): \"\"\" use model to test", "__init__(self, config, model): self.model = model self.config = config self.evaluator", "train_dataloader, eval_dataloader): \"\"\" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader \"\"\"", "import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model): self.model =", "self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader): \"\"\" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader):", "MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model): self.model = model self.config =", "test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader,", "test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader): \"\"\" 对于传统模型,不需要训练 Args:", "self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader): \"\"\" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader", "self.model = model self.config = config self.evaluator = get_evaluator(config) self.evaluate_res_dir", "config, model): self.model = model self.config = config self.evaluator =" ]