content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from . import generative
| [
6738,
764,
1330,
1152,
876,
198
] | 4.166667 | 6 |
from pathlib import Path
with (Path(__file__).parent / "input.txt").open() as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
from collections import defaultdict
limit = 1_000_000
houses = defaultdict(int)
number = int(puzzle_input_raw)
for elf in range(1, number):
for house in range(elf, min(elf * 50 + 1, limit), elf):
houses[house] += 11 * elf
if houses[elf] >= number:
print(elf)
break | [
6738,
3108,
8019,
1330,
10644,
198,
198,
4480,
357,
15235,
7,
834,
7753,
834,
737,
8000,
1220,
366,
15414,
13,
14116,
11074,
9654,
3419,
355,
15027,
62,
15414,
62,
7753,
25,
198,
220,
220,
220,
15027,
62,
15414,
62,
1831,
796,
15027,
... | 2.564972 | 177 |
#
# @lc app=leetcode id=377 lang=python3
#
# [377] Combination Sum IV
#
from typing import List
# @lc code=start
# @lc code=end
solve = Solution().combinationSum4
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
26514,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
26514,
60,
14336,
1883,
5060,
8363,
198,
2,
198,
6738,
19720,
1330,
7343,
198,
198,
2,
2488,
44601,
2438,
28,
9688,
628,
... | 2.65625 | 64 |
from django.utils import six
from nodeconductor.core.models import User, SshPublicKey
from nodeconductor.logging.log import EventLogger, event_logger
from nodeconductor.structure import models
event_logger.register('customer_role', CustomerRoleEventLogger)
event_logger.register('project_role', ProjectRoleEventLogger)
event_logger.register('project_group_role', ProjectGroupRoleEventLogger)
event_logger.register('project_group_membership', ProjectGroupMembershipEventLogger)
event_logger.register('user_organization', UserOrganizationEventLogger)
event_logger.register('customer', CustomerEventLogger)
event_logger.register('project', ProjectEventLogger)
event_logger.register('project_group', ProjectGroupEventLogger)
event_logger.register('balance', BalanceEventLogger)
event_logger.register('resource', ResourceEventLogger)
event_logger.register('service_settings', ServiceSettingsEventLogger)
event_logger.register('service_project_link', ServiceProjectLinkEventLogger)
| [
6738,
42625,
14208,
13,
26791,
1330,
2237,
198,
198,
6738,
18666,
721,
40990,
13,
7295,
13,
27530,
1330,
11787,
11,
311,
1477,
15202,
9218,
198,
6738,
18666,
721,
40990,
13,
6404,
2667,
13,
6404,
1330,
8558,
11187,
1362,
11,
1785,
62,
... | 3.539286 | 280 |
import collections | [
11748,
17268
] | 9 | 2 |
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras.layers import (
Activation,
Conv2D,
Dense,
Flatten,
add,
BatchNormalization,
)
from tensorflow.compat.v1.keras.regularizers import l2
def residual_block(
inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation="relu",
batch_normalization=True,
conv_first=True,
):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
activation=None,
)
conv2 = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
activation="linear",
)
x = conv(inputs)
x = BatchNormalization()(x)
x = Activation(activation)(x)
x = conv2(x)
x = add([inputs, x])
x = BatchNormalization()(x)
x = Activation(activation)(x)
return x
| [
11748,
11192,
273,
11125,
13,
5589,
265,
13,
85,
16,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
5589,
265,
13,
85,
16,
13,
6122,
292,
13,
75,
6962,
1330,
357,
198,
220,
220,
220,
13144,
341,
11,
198,
220,
220,
220,
34872,
17,
... | 2.318621 | 725 |
# pylint: disable=wildcard-import, unused-wildcard-import
from .backbone import *
from .deepten import *
from .sseg import *
__all__ = ['model_list', 'get_model']
models = {
# resnet
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
# resnest
'resnest50': resnest50,
'resnest101': resnest101,
'resnest200': resnest200,
'resnest269': resnest269,
# resnet other variants
'resnet50s': resnet50s,
'resnet101s': resnet101s,
'resnet152s': resnet152s,
'resnet50d': resnet50d,
'resnet50d_avd': resnet50d_avd,
'resnet50d_avdfast': resnet50d_avdfast,
'resnet50_avgdown': resnet50_avgdown,
'resnet50_avgdown_avdfast': resnet50_avgdown_avdfast,
'resnet50_avgdown_avd': resnet50_avgdown_avd,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x8d': resnext101_32x8d,
# other segmentation backbones
'xception65': xception65,
'wideresnet38': wideresnet38,
'wideresnet50': wideresnet50,
# deepten paper
'deepten_resnet50_minc': get_deepten_resnet50_minc,
# segmentation models
'encnet_resnet101s_coco': get_encnet_resnet101_coco,
'fcn_resnet50s_pcontext': get_fcn_resnet50_pcontext,
'encnet_resnet50s_pcontext': get_encnet_resnet50_pcontext,
'encnet_resnet101s_pcontext': get_encnet_resnet101_pcontext,
'encnet_resnet50s_ade': get_encnet_resnet50_ade,
'encnet_resnet101s_ade': get_encnet_resnet101_ade,
'fcn_resnet50s_ade': get_fcn_resnet50_ade,
'psp_resnet50s_ade': get_psp_resnet50_ade,
'deeplab_resnest50_ade': get_deeplab_resnest50_ade,
'deeplab_resnest101_ade': get_deeplab_resnest101_ade,
}
model_list = list(models.keys())
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Returns
-------
Module:
The model.
"""
name = name.lower()
if name not in models:
raise ValueError('%s\n\t%s' % (str(name), '\n\t'.join(sorted(models.keys()))))
net = models[name](**kwargs)
return net
| [
2,
279,
2645,
600,
25,
15560,
28,
21992,
9517,
12,
11748,
11,
21958,
12,
21992,
9517,
12,
11748,
198,
198,
6738,
764,
1891,
15992,
1330,
1635,
198,
6738,
764,
67,
1453,
457,
268,
1330,
1635,
198,
6738,
764,
82,
325,
70,
1330,
1635,
... | 2.181118 | 1,038 |
import pywhatkit as pwt
pwt.sendwhatmsg("+558599011005", "não sei fazer mais nada ;-;", 16,16) | [
11748,
12972,
10919,
15813,
355,
279,
46569,
198,
79,
46569,
13,
21280,
10919,
19662,
7203,
10,
2816,
5332,
2079,
486,
3064,
20,
1600,
366,
77,
28749,
384,
72,
277,
19178,
285,
15152,
299,
4763,
2162,
12,
26,
1600,
1467,
11,
1433,
8
] | 2.238095 | 42 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes benchmark testing for bert pretraining."""
# pylint: disable=line-too-long
import json
import os
import time
from typing import Optional
from absl import flags
from absl import logging
import tensorflow as tf
from official.benchmark import benchmark_wrappers
from official.benchmark import bert_benchmark_utils
from official.benchmark import owner_utils
from official.common import distribute_utils
from official.nlp.bert import run_pretraining
from official.utils.flags import core as flags_core
# Pretrain masked lanauge modeling accuracy range:
MIN_MLM_ACCURACY = 0.635
MAX_MLM_ACCURACY = 0.645
# Pretrain next sentence prediction accuracy range:
MIN_NSP_ACCURACY = 0.94
MAX_NSP_ACCURACY = 0.96
# Pretrain masked lanauge modeling accuracy range:
MIN_MLM_ACCURACY_GPU = 0.378
MAX_MLM_ACCURACY_GPU = 0.388
# Pretrain next sentence prediction accuracy range:
MIN_NSP_ACCURACY_GPU = 0.82
MAX_NSP_ACCURACY_GPU = 0.84
BERT_PRETRAIN_FILES_SEQ128 = 'gs://mlcompass-data/bert/pretraining_data/seq_128/wikipedia.tfrecord*,gs://mlcompass-data/bert/pretraining_data/seq_128/books.tfrecord*'
BERT_BASE_CONFIG_FILE = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12/bert_config.json'
FLAGS = flags.FLAGS
class BertPretrainAccuracyBenchmark(bert_benchmark_utils.BertBenchmarkBase):
"""Benchmark accuracy tests for BERT Pretraining."""
def __init__(self,
output_dir: Optional[str] = None,
tpu: Optional[str] = None,
**kwargs):
"""Inits BertPretrainAccuracyBenchmark class.
Args:
output_dir: Directory where to output e.g. log files
tpu: TPU name to use in a TPU benchmark.
**kwargs: Additional keyword arguments.
"""
super(BertPretrainAccuracyBenchmark, self).__init__(
output_dir=output_dir, tpu=tpu, **kwargs)
def _get_distribution_strategy(self, ds_type='mirrored'):
"""Gets the distribution strategy.
Args:
ds_type: String, the distribution strategy type to be used. Can be
'mirrored', 'multi_worker_mirrored', 'tpu' and 'off'.
Returns:
A `tf.distribute.DistibutionStrategy` object.
"""
if self.tpu or ds_type == 'tpu':
return distribute_utils.get_distribution_strategy(
distribution_strategy='tpu', tpu_address=self.tpu)
elif ds_type == 'multi_worker_mirrored':
# Configures cluster spec for multi-worker distribution strategy.
_ = distribute_utils.configure_cluster(FLAGS.worker_hosts,
FLAGS.task_index)
return distribute_utils.get_distribution_strategy(
distribution_strategy=ds_type,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self, summary_path: str, report_accuracy: bool,
ds_type: str):
"""Runs and reports the benchmark given the provided configuration."""
distribution = self._get_distribution_strategy(ds_type=ds_type)
logging.info('Flags: %s', flags_core.get_nondefault_flags_as_str())
start_time_sec = time.time()
run_pretraining.run_bert_pretrain(
strategy=distribution, custom_callbacks=self.timer_callback)
wall_time_sec = time.time() - start_time_sec
# For GPU multi-worker, the summary text file is only generated on chief
# (metrics aggregated), so only chief has to report the result.
if tf.io.gfile.exists(summary_path):
with tf.io.gfile.GFile(summary_path, 'rb') as reader:
summary = json.loads(reader.read().decode('utf-8'))
self._report_benchmark(summary, start_time_sec, wall_time_sec,
report_accuracy, ds_type)
@owner_utils.Owner('tf-model-garden')
def benchmark_accuracy_8x8_tpu_bf16_seq128_500k_steps(self):
"""Test bert pretraining with 8x8 TPU for 500k steps."""
# This is used for accuracy test.
self._setup()
self._specify_common_flags()
self._specify_tpu_common_flags()
FLAGS.train_batch_size = 512
FLAGS.num_steps_per_epoch = 500000
FLAGS.num_train_epochs = 1
FLAGS.model_dir = self._get_model_dir(
'benchmark_accuracy_8x8_tpu_bf16_seq128_500k_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Set train_summary_interval to -1 to disable training summary, because
# writing summary to gcs may fail and summaries are not needed for this
# accuracy benchmark test.
FLAGS.train_summary_interval = -1
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=True,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-model-garden')
def benchmark_perf_2x2_tpu_bf16_seq128_10k_steps(self):
"""Test bert pretraining with 2x2 TPU for 10000 steps."""
self._setup()
self._specify_common_flags()
self._specify_tpu_common_flags()
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 2
FLAGS.train_batch_size = 128
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_2x2_tpu_bf16_seq128_10k_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-model-garden')
def benchmark_perf_2x2_tpu_bf16_seq128_10k_steps_mlir(self):
"""Test bert pretraining with 2x2 TPU with MLIR for 10000 steps."""
self._setup()
self._specify_common_flags()
self._specify_tpu_common_flags()
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 2
FLAGS.train_batch_size = 128
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_2x2_tpu_bf16_seq128_10k_steps_mlir')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
tf.config.experimental.enable_mlir_bridge()
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-model-garden')
def benchmark_perf_4x4_tpu_bf16_seq128_10k_steps(self):
"""Test bert pretraining with 4x4 TPU for 10000 steps."""
self._setup()
self._specify_common_flags()
self._specify_tpu_common_flags()
FLAGS.train_batch_size = 512
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 2
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_4x4_tpu_bf16_seq128_10k_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-model-garden')
def benchmark_perf_4x4_tpu_bf16_seq128_10k_steps_mlir(self):
"""Test bert pretraining with 4x4 TPU with MLIR for 10000 steps."""
self._setup()
self._specify_common_flags()
self._specify_tpu_common_flags()
FLAGS.train_batch_size = 512
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 2
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_4x4_tpu_bf16_seq128_10k_steps_mlir')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
tf.config.experimental.enable_mlir_bridge()
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-model-garden')
def benchmark_perf_8x8_tpu_bf16_seq128_10k_steps(self):
"""Test bert pretraining with 8x8 TPU for 10000 steps."""
self._setup()
self._specify_common_flags()
self._specify_tpu_common_flags()
FLAGS.train_batch_size = 512
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 2
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_8x8_tpu_bf16_seq128_10k_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-dist-strat')
def benchmark_accuracy_1x8_gpu_fp16_seq128_15k_steps(self):
"""Test bert pretraining with 8 GPU for 15k steps."""
# This is used for accuracy test.
self._setup()
self._specify_common_flags()
self._specify_gpu_common_flags()
FLAGS.num_gpus = 8
FLAGS.train_batch_size = 96
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 3
FLAGS.steps_per_loop = 5000
FLAGS.model_dir = self._get_model_dir(
'benchmark_accuracy_1x8_gpu_fp16_seq128_15k_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Set train_summary_interval to -1 to disable training summary, because
# writing summary to gcs may fail and summaries are not needed for this
# accuracy benchmark test.
FLAGS.train_summary_interval = -1
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=True,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-dist-strat')
def benchmark_perf_1x1_gpu_fp16_seq128_200_steps(self):
"""Test bert pretraining with 1 GPU for 200 steps."""
self._setup()
self._specify_common_flags()
self._specify_gpu_common_flags()
FLAGS.num_steps_per_epoch = 200
FLAGS.num_train_epochs = 1
FLAGS.num_gpus = 1
FLAGS.train_batch_size = 12
FLAGS.steps_per_loop = 100
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_1x1_gpu_fp16_seq128_200_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-dist-strat')
def benchmark_perf_1x8_gpu_fp16_seq128_200_steps(self):
"""Test bert pretraining with 8 GPU for 200 steps."""
self._setup()
self._specify_common_flags()
self._specify_gpu_common_flags()
FLAGS.num_steps_per_epoch = 200
FLAGS.num_train_epochs = 1
FLAGS.num_gpus = 8
FLAGS.train_batch_size = 96
FLAGS.steps_per_loop = 100
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_1x8_gpu_fp16_seq128_200_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
class BertPretrainMultiWorkerBenchmark(BertPretrainAccuracyBenchmark):
"""Bert pretrain distributed benchmark tests with multiple workers."""
@owner_utils.Owner('tf-dist-strat')
def benchmark_accuracy_mwms_1x8_gpu_fp16_seq128_15k_steps(self):
"""Test bert pretraining with 8 GPU for 15k steps."""
# This is used for accuracy test.
self._setup()
self._specify_common_flags()
self._specify_gpu_mwms_flags()
FLAGS.train_batch_size = 96
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 3
FLAGS.steps_per_loop = 5000
FLAGS.model_dir = self._get_model_dir(
'benchmark_accuracy_mwms_1x8_gpu_fp16_seq128_15k_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Set train_summary_interval to -1 to disable training summary, because
# writing summary to gcs may fail and summaries are not needed for this
# accuracy benchmark test.
FLAGS.train_summary_interval = -1
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=True,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-dist-strat')
def benchmark_accuracy_mwms_2x8_gpu_fp16_seq128_15k_steps(self):
"""Test bert pretraining with 2x8 GPU for 15k steps."""
# This is used for accuracy test.
self._setup()
self._specify_common_flags()
self._specify_gpu_mwms_flags()
# ues the same global batch size as accuracy_mwms_1x8 benchmark.
FLAGS.train_batch_size = 96
FLAGS.num_steps_per_epoch = 5000
FLAGS.num_train_epochs = 3
FLAGS.steps_per_loop = 5000
FLAGS.model_dir = self._get_model_dir(
'benchmark_accuracy_mwms_2x8_gpu_fp16_seq128_15k_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Set train_summary_interval to -1 to disable training summary, because
# writing summary to gcs may fail and summaries are not needed for this
# accuracy benchmark test.
FLAGS.train_summary_interval = -1
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=True,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-dist-strat')
def benchmark_perf_mwms_1x8_gpu_fp16_seq128_200_steps(self):
"""Test bert pretraining with 1x8 GPU for 200 steps."""
self._setup()
self._specify_common_flags()
self._specify_gpu_mwms_flags()
FLAGS.num_steps_per_epoch = 200
FLAGS.num_train_epochs = 1
FLAGS.train_batch_size = 96 * 1
FLAGS.steps_per_loop = 100
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_mwms_1x8_gpu_fp16_seq128_200_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-dist-strat')
def benchmark_perf_mwms_2x8_gpu_fp16_seq128_200_steps(self):
"""Test bert pretraining with 2x8 GPU for 200 steps."""
self._setup()
self._specify_common_flags()
self._specify_gpu_mwms_flags()
FLAGS.num_steps_per_epoch = 200
FLAGS.num_train_epochs = 1
FLAGS.train_batch_size = 96 * 2
FLAGS.steps_per_loop = 100
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_mwms_2x8_gpu_fp16_seq128_200_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
@owner_utils.Owner('tf-dist-strat')
def benchmark_perf_mwms_8x8_gpu_fp16_seq128_200_steps(self):
"""Test bert pretraining with 8x8 GPU for 200 steps."""
self._setup()
self._specify_common_flags()
self._specify_gpu_mwms_flags()
FLAGS.num_steps_per_epoch = 200
FLAGS.num_train_epochs = 1
FLAGS.train_batch_size = 96*8
FLAGS.steps_per_loop = 100
FLAGS.model_dir = self._get_model_dir(
'benchmark_perf_mwms_8x8_gpu_fp16_seq128_200_steps')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
# Disable accuracy check.
self._run_and_report_benchmark(
summary_path=summary_path,
report_accuracy=False,
ds_type=FLAGS.distribution_strategy)
if __name__ == '__main__':
tf.test.main()
| [
2,
406,
600,
355,
25,
21015,
18,
198,
2,
15069,
12131,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743... | 2.334183 | 7,059 |
import matplotlib
import importlib
from hydroDL import kPath, utils
from hydroDL.app import waterQuality
from hydroDL.master import basins
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.master import slurm
from hydroDL.post import axplot, figplot
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import json
import scipy
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
codeLst = sorted(usgs.newC)
ep = 500
reTest = False
dataName = 'rbWN5'
siteNoLst = dictSite['comb']
nSite = len(siteNoLst)
# load all sequence
dictLSTMLst = list()
# LSTM
labelLst = ['QFP_C']
for label in labelLst:
dictLSTM = dict()
trainSet = 'comb-B10'
outName = '{}-{}-{}-{}'.format(dataName, 'comb', label, trainSet)
for k, siteNo in enumerate(siteNoLst):
print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r')
df = basins.loadSeq(outName, siteNo)
dictLSTM[siteNo] = df
dictLSTMLst.append(dictLSTM)
# WRTDS
dictWRTDS = dict()
dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'Linear-W', 'B20', 'output')
for k, siteNo in enumerate(siteNoLst):
print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r')
saveFile = os.path.join(dirWRTDS, siteNo)
df = pd.read_csv(saveFile, index_col=None).set_index('date')
# df = utils.time.datePdf(df)
dictWRTDS[siteNo] = df
# Observation
dictObs = dict()
for k, siteNo in enumerate(siteNoLst):
print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r')
df = waterQuality.readSiteTS(siteNo, varLst=codeLst, freq='W')
dictObs[siteNo] = df
# calculate correlation
tt = np.datetime64('2010-01-01')
ind1 = np.where(df.index.values < tt)[0]
ind2 = np.where(df.index.values >= tt)[0]
dictLSTM = dictLSTMLst[1]
dictLSTM2 = dictLSTMLst[0]
corrMat = np.full([len(siteNoLst), len(codeLst), 4], np.nan)
rmseMat = np.full([len(siteNoLst), len(codeLst), 4], np.nan)
for ic, code in enumerate(codeLst):
for siteNo in dictSite[code]:
indS = siteNoLst.index(siteNo)
v1 = dictLSTM[siteNo][code].iloc[ind2].values
v2 = dictWRTDS[siteNo][code].iloc[ind2].values
v3 = dictObs[siteNo][code].iloc[ind2].values
v4 = dictLSTM2[siteNo][code].iloc[ind2].values
[v1, v2, v3, v4], ind = utils.rmNan([v1, v2, v3, v4])
rmse1, corr1 = utils.stat.calErr(v1, v2, rmExt=False)
rmse2, corr2 = utils.stat.calErr(v1, v3, rmExt=False)
rmse3, corr3 = utils.stat.calErr(v2, v3, rmExt=False)
rmse4, corr4 = utils.stat.calErr(v4, v3, rmExt=False)
corrMat[indS, ic, 0] = corr1
corrMat[indS, ic, 1] = corr2
corrMat[indS, ic, 2] = corr3
corrMat[indS, ic, 3] = corr4
matplotlib.rcParams.update({'font.size': 12})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
# plot box
labLst1 = [usgs.codePdf.loc[code]['shortName'] +
'\n'+code for code in codeLst]
labLst2 = ['LSTM vs WRTDS', 'LSTM vs Obs', 'WRTDS vs Obs']
dataBox = list()
for k in range(len(codeLst)):
code = codeLst[k]
temp = list()
for i in [0, 1, 2]:
temp.append(corrMat[:, k, i])
dataBox.append(temp)
fig = figplot.boxPlot(dataBox, label1=labLst1, widths=0.5, cLst='grb',
label2=labLst2, figsize=(20, 5), yRange=[0, 1])
fig.show()
# plot 121
importlib.reload(axplot)
codeLst2 = ['00095', '00400', '00405', '00600', '00605',
'00618', '00660', '00665', '00681', '00915',
'00925', '00930', '00935', '00940', '00945',
'00950', '00955', '70303', '71846', '80154']
fig, axes = plt.subplots(5, 4)
ticks = [-0.5, 0, 0.5, 1]
for k, code in enumerate(codeLst2):
j, i = utils.index2d(k, 5, 4)
ax = axes[j, i]
ind = codeLst.index(code)
x = corrMat[:, ind, 1]
y = corrMat[:, ind, 2]
c = corrMat[:, ind, 0]
out = axplot.scatter121(ax, x, y, c)
rmse, corr = utils.stat.calErr(x, y)
titleStr = '{} {} {:.2f}'.format(
code, usgs.codePdf.loc[code]['shortName'], corr)
_ = ax.set_xlim([ticks[0], ticks[-1]])
_ = ax.set_ylim([ticks[0], ticks[-1]])
_ = ax.set_yticks(ticks[1:])
_ = ax.set_xticks(ticks[1:])
axplot.titleInner(ax, titleStr)
# print(i, j)
if i != 0:
_ = ax.set_yticklabels([])
if j != 4:
_ = ax.set_xticklabels([])
# _ = ax.set_aspect('equal')
# plt.subplots_adjust(wspace=0, hspace=0)
# fig.colorbar(out, ax=ax)
fig.show()
fig, ax = plt.subplots(1, 1)
code = '00095'
ind = codeLst.index(code)
x = corrMat[:, ind, 1]
y = corrMat[:, ind, 2]
c = corrMat[:, ind, 0]
out = axplot.scatter121(ax, x, y, c)
fig.colorbar(out, ax=ax)
fig.show()
# 121 LSTM inputs
importlib.reload(axplot)
codeLst2 = ['00095', '00400', '00405', '00600', '00605',
'00618', '00660', '00665', '00681', '00915',
'00925', '00930', '00935', '00940', '00945',
'00950', '00955', '70303', '71846', '80154']
fig, axes = plt.subplots(5, 4)
yticks = [-0.5, 0, 0.5, 1]
xticks = [-0.5, 0, 0.5, 1]
for k, code in enumerate(codeLst2):
j, i = utils.index2d(k, 5, 4)
ax = axes[j, i]
ind = codeLst.index(code)
y = corrMat[:, ind, 1]
x = corrMat[:, ind, 3]
# c = np.argsort(countMat2[:, ind])
axplot.plot121(ax, x, y)
rmse, corr = utils.stat.calErr(x, y, rmExt=False)
titleStr = '{} {} {:.2f}'.format(
code, usgs.codePdf.loc[code]['shortName'], corr)
axplot.titleInner(ax, titleStr)
_ = ax.set_xlim([xticks[0], xticks[-1]])
_ = ax.set_ylim([yticks[0], yticks[-1]])
_ = ax.set_xticks(xticks[1:])
_ = ax.set_yticks(yticks[1:])
# print(i, j)
if i != 0:
_ = ax.set_yticklabels([])
if j != 4:
_ = ax.set_xticklabels([])
# _ = ax.set_aspect('equal')
plt.subplots_adjust(wspace=0, hspace=0)
# fig.colorbar()
fig.show()
| [
198,
11748,
2603,
29487,
8019,
198,
11748,
1330,
8019,
198,
6738,
17173,
19260,
1330,
479,
15235,
11,
3384,
4487,
198,
6738,
17173,
19260,
13,
1324,
1330,
1660,
35013,
198,
6738,
17173,
19260,
13,
9866,
1330,
1615,
1040,
198,
6738,
17173,... | 2.030158 | 2,918 |
"""________________readme________________"""
# source code for Private Function in Python
# how to use it is given in the docstring portion of the PrivateFunc class
# please report any bug
# first release: April 6, 2021
# latest update release: April 8, 2021
# version: 1.2.21
"""________________readme________________"""
""" Copyright (C) 2021 Md. Faheem Hossain fmhossain2941@gmail.com"""
""" Permission is hereby granted, free of charge, to any person obtaining a copy of this code, to
deal in the code without restriction, including without limitation the rights to use, copy, publish,
distribute, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Code.
THE CODE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
CODE OR THE USE OR OTHER DEALINGS IN THE CODE."""
class PrivateFunc:
""" >> description: this is a class which one can use to create private functions
>> public functions: private
>> how to use:
# first of all import this private.py file, then create an object of this class. Before private
functions use the
'private' method as a decorator
>> sample code: (sample.py) __________
from private import PrivateFunc
privatefunc = PrivateFunc('sample') # enter the current module name
# or privatefunc = PrivateFunc('sample', error_name = ImportError, error_message = "false import")
# error_name is the name of the error which is raised when private function is illegally called
# and error_message is the message which will be shown with the error
# either enter both error_name and error_message, or none
@privatefunc.private
def a():
return 10
# now 'a' is a private function"""
__slots__ = ["_filename", "__error_name", "__error_message"]
__version__ = '1.2.21'
_filename: str
__error_name: type
__error_message: str
def private(self, func):
"""this is the core function of the class"""
return wrap
"""Obs.: 1. The source code has syntax highlighting. In case you find it hard to read, as I wrote it in my
PC, so I'll also recommend you to use a wider screen. 2. Since SL only allows a single page for a project,
so I had to find a way to combine 3 python files into 1. Let me say a bit about the demo code: the name of
those 3 files: a) private.py (source code) b) moduleWithPrivateFunction.py (a file with private functions)
c) file0.py (the file provided by SL; it calls the functions from the 2nd file and tests if they are
working properly or not). Please copy line 9-48, 53-61 & 81-111 in 3 different files (as recommended above)
and then read them. """ | [
37811,
4841,
961,
1326,
4841,
37811,
198,
2,
2723,
2438,
329,
15348,
15553,
287,
11361,
198,
2,
703,
284,
779,
340,
318,
1813,
287,
262,
2205,
8841,
6903,
286,
262,
15348,
37,
19524,
1398,
198,
2,
3387,
989,
597,
5434,
198,
2,
717,
... | 3.324296 | 959 |
import django
from django.core.handlers.wsgi import WSGIHandler
def get_wsgi_application():
"""
The public interface to Django's WSGI support. Return a WSGI callable.
Avoids making django.core.handlers.WSGIHandler a public API, in case the
internal WSGI implementation changes or moves in the future.
"""
django.setup(set_prefix=False)
return WSGIHandler()
| [
11748,
42625,
14208,
201,
198,
6738,
42625,
14208,
13,
7295,
13,
4993,
8116,
13,
18504,
12397,
1330,
25290,
18878,
25060,
201,
198,
201,
198,
201,
198,
4299,
651,
62,
18504,
12397,
62,
31438,
33529,
201,
198,
220,
220,
220,
37227,
201,
... | 2.864286 | 140 |
'''
@Krishna Somandepalli - July 2017
Train a simple deep VGG-style CNN to predict race from face.
The race databases were constructed from here: https://docs.google.com/spreadsheets/d/16XkCRkipjKMGVZ1GQXG3ZOgUgtcYewbfIYgezlmM9Gc/edit#gid=0
5 race classes: caucasian, african, eastasian, asianindian, latino (nativeamerican/pacificis ignored due to lack of data)
split data into train and test manually - make sure test data has unique identities not seen in training.
Test has balanced class data; Train has highly imbalanced class data
NOTE on DATA
The data cannot be released since some of the image databases required signing a data release document
You can recreate the database from the above google document
The race labels acquired from movie characters has been updated here by identity in case if one wants to use it for CASIA or such databases
The preprocessing scripts have been updated
'''
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import BatchNormalization, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import metrics
from keras.callbacks import TensorBoard
import random
import json
import numpy as np
from itertools import groupby, islice, cycle
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from PIL import Image
from keras.callbacks import CSVLogger
import tensorflow as tf
#function to read a list of image files and return an array for training/testing
ImLoad = lambda f: \
np.asarray( [np.asarray(Image.open(i))*(1./255.0) for i in f] )[..., np.newaxis]
#tensorflow image format - standard VGG-16 with modifications for grayscale images
def generate_vgg16_conf1(num_classes, in_shape = (100, 100, 1)):
""" modified - smaller version of original VGG16 """
# Block 1
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', \
name='block1_conv1', input_shape=in_shape))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block2_conv1'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block2_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block3_conv1'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block3_conv2'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block3_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
# Block 4
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block4_conv1'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block4_conv2'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block4_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
# Classification block
model.add(Flatten(name='flatten'))
model.add(Dense(512, activation='relu', name='fc1'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu', name='fc2'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax', name='predictions'))
return model
def generate_vgg16(num_classes, in_shape = (100, 100, 1)):
""" modified - smaller version of original VGG16 with BatchNorm and Dropout """
# Block 1
with tf.device('/cpu:0'):
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', \
name='block1_conv1', input_shape=in_shape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), padding='same', name='block1_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
model.add(Conv2D(64, (3, 3), padding='same', name='block2_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same', name='block2_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
model.add(Conv2D(128, (3, 3), padding='same', name='block3_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), padding='same', name='block3_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), padding='same', name='block3_conv3'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
# Block 4
model.add(Conv2D(256, (3, 3), padding='same', name='block4_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3), padding='same', name='block4_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3), padding='same', name='block4_conv3'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
# Classification block
model.add(Flatten(name='flatten'))
model.add(Dense(1024, name='fc1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, name='fc2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='sigmoid', name='predictions'))
return model
classes = ['african', 'asianindian', 'caucasian', 'eastasian', 'latino']#, 'nativeamerican']
num_classes = len(classes) #5
#load race labels from a dictionary of following format:
#{"latino":[im1.jpg, im2.jpg,....], "caucasian":[/path/to/image1.jpg, /path/to/imag2/jpg]}
all_images = json.load(open('SSD_ALL_im_race_dict.json', 'r'))
# label encoder for one-hot encoding
labeler = LabelEncoder()
labeler.fit(classes)
# reading and preparing test images - test images selected to keep identities unseen from training
all_test_images = [i.strip() for i in open('all_test_images.txt', 'r').readlines()]
all_test_labels = [i.strip() for i in open('all_test_labels.txt', 'r').readlines()]
all_test_ = zip(all_test_images, all_test_labels)
all_test = [i for i in all_test_ if i[1] in classes]
[random.shuffle(all_images[k]) for k in classes]
test_labels_int = labeler.transform([i[1] for i in all_test])
test_labels = np_utils.to_categorical(test_labels_int)
test_images = ImLoad([i[0] for i in all_test])
# num test images per class
N_test = 100
num_images_per_class = [len(all_images[k]) for k in classes]
# batch generator helper
#classes_rcycle = [rcycle( random.sample( sorted(all_images[k])[::-1][N_test:], \
# len(all_images[k][N_test:]) ) ) for k in classes]
batch_size_per_class = 10
# number of images per class to subsample
min_class_size = min(num_images_per_class) - N_test
#num_batches_per_ep = (min_class_size - N_test)/batch_size_per_class
#num_epochs = (max(num_images_per_class) - N_test)/(min_class_size - N_test)
# randomly sample from all classes to the min_class_size + shuffle
ALL_IMAGES = [ random.sample([i for i in all_images[k] if i not in all_test_images], min_class_size) for k in classes]
[random.shuffle(i) for i in ALL_IMAGES]
#classes_rcycle = [rcycle( random.sample( [i for i in all_images[k] if i not in all_test_images], min_class_size ) + ) \
# for k in classes]
print('preparing the image batch generator ----')
classes_rcycle = [rcycle(i) for i in ALL_IMAGES]
print("DONE LOADING IMAGES - - -- - - - - - - - - - - - - - - ")
# The efforts taken here to write a batch generator are to ensure class balance in each batch!
#im_labels are fixed for each batch, so we neednot redo this in the tarinig loop
im_labels_ = []
for cl in classes:
im_labels_ += [cl for _ in range(batch_size_per_class)]
#encoded labels
labels_encoded = labeler.transform(im_labels_)
#one hot encoded
im_labels = np_utils.to_categorical(labels_encoded)
# fn. to get class-wise performance
# RUN THE CNN MODEL
# with tf.device('/cpu:0'):
if True:
# model load arch
model = generate_vgg16_conf1(num_classes=num_classes)
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr = 0.00001, decay = 1e-6)
#opt = keras.optimizers.Adam()
# compile the model
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
train_generator = image_batch_generator()
num_epochs = 30
csv_logger = CSVLogger('log_multiclass_conf1_5class_09_12_2017.csv', append=True, separator=';')
model_checkpoint = keras.callbacks.ModelCheckpoint("multiclass_conf1_5class.{epoch:02d}.hdf5", \
monitor='val_loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
model.fit_generator(
train_generator,
steps_per_epoch = 3500,
epochs=num_epochs,
validation_data= (test_images, test_labels),
validation_steps = 10,
callbacks = [csv_logger, model_checkpoint])
model.save('multiclass_conf1_5class_%dep_09_12_2017.h5' % (num_epochs))
# num_batches_per_ep = 1500
# ### TRAINING LOOP
# for e in range(num_epochs):
# print('Epoch', e)
# for b in range(num_batches_per_ep):
# im_array, im_labels = train_generator.next()
# if not b%100: print(b)
# # resume training
# # just before ending training for this epoch show accuracies, etc
# if b == num_batches_per_ep-1:
# model.fit(im_array, im_labels, batch_size=250, epochs=1, verbose=2,\
# shuffle=True, callbacks = [ csv_logger ])
# else:
# model.fit(im_array, im_labels, batch_size=250, epochs=1, verbose=0, \
# shuffle=True)
# # predict ans save model for the last batch for this epoch - until then train
# ## TESTING SUBLOOP
# if b == num_batches_per_ep-1:
# # pred_labels = model.predict(test_images, batch_size=250, verbose=1)
# pred_prob = model.predict_proba(test_images, batch_size=250, verbose=1)
# print( 'val acc = ', get_val_accuracy(pred_prob) )
# model.save('multilabel_subsample_racenet_all_ims_ep%d.h5' % (e))
# np.savez('pred_info_4_ep%d' % (e), \
# true_labels = test_labels_, pred_prob = pred_prob, \
# im_list = test_image_list)
| [
7061,
6,
198,
31,
42,
37518,
2616,
9995,
392,
538,
36546,
532,
2901,
2177,
198,
198,
44077,
257,
2829,
2769,
569,
11190,
12,
7635,
8100,
284,
4331,
3234,
422,
1986,
13,
198,
464,
3234,
20083,
547,
12006,
422,
994,
25,
3740,
1378,
31... | 2.457586 | 4,574 |
import abc
import functools
import logging
import re
import subprocess
try:
import magic
except ImportError:
magic = None
import html.parser
from bs4 import BeautifulSoup
import lxml.etree as etree
logger = logging.getLogger(__name__)
@functools.total_ordering
class PlainText(DocumentParser):
"""
Possibly show a small plain text object. If the text is too long to be
shown, handling is passed on to different plugins.
"""
| [
11748,
450,
66,
198,
11748,
1257,
310,
10141,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
850,
14681,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
5536,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
5536,
796,
6045,
198,
198... | 3.130137 | 146 |
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Provides basic mocks of core storage service classes, for unit testing:
ACL, Key, Bucket, Connection, and StorageUri. We implement a subset of
the interfaces defined in the real boto classes, but don't handle most
of the optional params (which we indicate with the constant "NOT_IMPL").
"""
import copy
import boto
import base64
import re
from hashlib import md5
from boto.utils import compute_md5
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
from boto.s3.prefix import Prefix
from boto.compat import six
NOT_IMPL = None
# We only mock a single provider/connection.
mock_connection = MockConnection()
| [
2,
15069,
3050,
3012,
3457,
13,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
4866,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
198,
2,
366,
25423,
12340,
284,
17... | 3.695096 | 469 |
import sys, fmdata
"""
Simple script to extract a 'test' data set from a real '.dat' file.
"""
f = open(sys.argv[1], "r")
d = fmdata.readFromDatFile(f)
f.close()
linenums = []
for x in xrange(0, d.nx, 2):
for y in xrange(0, d.ny, 2):
for z in xrange(0, d.nz, 6):
linenums.append(x*d.ny*d.nz + y*d.nz + z)
linenums.sort()
f = open(sys.argv[1], "r")
sys.stdout.write(f.readline())
sys.stdout.write(f.readline())
current = -1
for linenum in linenums:
while current != linenum:
line = f.readline()
current += 1
sys.stdout.write(line)
f.close()
| [
198,
11748,
25064,
11,
277,
76,
7890,
198,
198,
37811,
198,
26437,
4226,
284,
7925,
257,
705,
9288,
6,
1366,
900,
422,
257,
1103,
45302,
19608,
6,
2393,
13,
198,
37811,
198,
198,
69,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
4357,... | 2.098592 | 284 |
from __future__ import unicode_literals
import copy
from . import models as conference_models
class ConferenceTestingMixin(object):
"""
This is a simple mixin that provides helper methods for initializing
fully setup conference objects and related models like SessionKinds.
"""
_registered_conference_setups = set()
def create_test_conference(self, prefix=None):
"""
Creates testcase local conference, session kind, track, ... variables
with the given prefix.
"""
if prefix in self._registered_conference_setups:
raise RuntimeError(u"Conference with prefix {0} already set up!"
.format(prefix))
if prefix is None:
prefix = u""
conference = conference_models.Conference(title="TestCon")
conference.save()
audience_level = conference_models.AudienceLevel(
level=1,
name='Level 1', conference=conference
)
audience_level.save()
kind = conference_models.SessionKind(
conference=conference,
closed=False, slug='kind'
)
kind.save()
duration = conference_models.SessionDuration(
minutes=30,
conference=conference)
duration.save()
track = conference_models.Track(
name="NAME", slug="SLUG",
conference=conference
)
track.save()
setattr(self, "{0}conference".format(prefix), conference)
setattr(self, "{0}audience_level".format(prefix), audience_level)
setattr(self, "{0}kind".format(prefix), kind)
setattr(self, "{0}duration".format(prefix), duration)
setattr(self, "{0}track".format(prefix), track)
self._registered_conference_setups.add(prefix)
def destroy_test_conference(self, prefix):
"""
Removes the conference set with the given prefix from the current
testcase instance.
"""
if prefix not in self._registered_conference_setups:
raise RuntimeError("Conference with prefix {0} doesn't exist!"
.format(prefix))
conference = getattr(self, "{0}conference".format(prefix))
if hasattr(conference, 'proposal_set'):
conference.proposal_set.all().delete()
conference.delete()
getattr(self, "{0}audience_level".format(prefix)).delete()
getattr(self, "{0}kind".format(prefix)).delete()
getattr(self, "{0}duration".format(prefix)).delete()
getattr(self, "{0}track".format(prefix)).delete()
self._registered_conference_setups.remove(prefix)
def destroy_all_test_conferences(self):
"""
Removes all known conference sets from the current testcase instance.
"""
for prefix in copy.copy(self._registered_conference_setups):
self.destroy_test_conference(prefix)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
4866,
198,
198,
6738,
764,
1330,
4981,
355,
4495,
62,
27530,
628,
198,
4871,
8785,
44154,
35608,
259,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
22... | 2.465214 | 1,193 |
import configparser
def config(section, file='database.ini'):
"""parses through a file and returns configuration settings for a given section in an INI file
Args:
section (str) - name of the section in the configuration INI file
file (str) - file name of INI file
Returns:
configuration (obj) - a configuration object with config settings from INI file
"""
configuration = configparser.ConfigParser()
configuration.read(file)
db_config = {}
if configuration.has_section(section):
params = configuration.items(section)
for param in params:
db_config[param[0]] = param[1]
else:
raise Exception('{0} not found in the {1} file'.format(section, file))
return db_config | [
11748,
4566,
48610,
198,
198,
4299,
4566,
7,
5458,
11,
2393,
11639,
48806,
13,
5362,
6,
2599,
198,
220,
220,
220,
37227,
79,
945,
274,
832,
257,
2393,
290,
5860,
8398,
6460,
329,
257,
1813,
2665,
287,
281,
3268,
40,
2393,
198,
220,
... | 2.757895 | 285 |
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Unlicense OR CC0-1.0
import logging
import os
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32c3
@pytest.mark.esp32s2
@pytest.mark.esp32s3
@pytest.mark.wifi
def test_examples_protocol_https_x509_bundle(dut: Dut) -> None:
"""
steps: |
1. join AP
2. connect to multiple URLs
3. send http request
"""
# check and log bin size
binary_file = os.path.join(dut.app.binary_path, 'https_x509_bundle.bin')
bin_size = os.path.getsize(binary_file)
logging.info('https_x509_bundle_bin_size : {}KB'.format(bin_size // 1024))
# start test
num_URLS = int(dut.expect(r'Connecting to (\d+) URLs', timeout=30)[1].decode())
dut.expect(r'Connection established to ([\s\S]*)', timeout=30)
dut.expect('Completed {} connections'.format(num_URLS), timeout=60)
@pytest.mark.esp32
@pytest.mark.esp32c3
@pytest.mark.esp32s2
@pytest.mark.esp32s3
@pytest.mark.wifi
@pytest.mark.parametrize('config', ['ssldyn',], indirect=True)
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33160,
20386,
601,
361,
11998,
357,
2484,
272,
20380,
8,
7375,
42513,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
791,
43085,
6375,
12624,
15,
12,
16,
13,
15,
198,
11748,
18931,
198,
... | 2.399123 | 456 |
from fastapi import APIRouter, Depends, HTTPException, Body
from fastapi.security import OAuth2PasswordRequestForm
from starlette.status import HTTP_400_BAD_REQUEST
# custom defined
from app.models.user import UserCreate, User, TokenResponse, UserListResponse, UserCreateRequest, RolePatchRequest, \
RoleCreateModel, GroupEnum, CaseTypeEnum, DivisionCreateModel, RoleWithDivisionModel
from app.crud.user import create_user, get_user, get_user_list_by_query_with_page_and_limit, count_user_by_query, \
get_user_by_name, update_role_with_item, create_role_with_item, delete_group_by_query, \
get_one_group_by_query, get_one_user_by_query, create_division_with_item, get_one_division_by_query, \
update_division_by_query_with_item, get_group_list, get_division_list_unfold_user_by_query, delete_user_by_query, \
delete_division_by_query, update_user_info_by_query_with_item
from app.dependencies.jwt import get_current_user_authorizer
from app.utils.jwt import create_access_token
from app.db.mongodb import AsyncIOMotorClient, get_database
from app.core.config import api_key as API_KEY
from app.utils.security import generate_salt, get_password_hash, verify_password
router = APIRouter()
@router.post("/users/login", response_model=TokenResponse, tags=["user"], name='账号密码登录')
@router.post('/user', tags=['admin'], name='单个用户添加')
@router.get('/user_list', tags=['admin'], response_model=UserListResponse, name='用户列表获取')
@router.get('/user/me', tags=['user'], name='用户个人信息')
@router.post("/users/init", tags=["user"], name='初始化管理员')
@router.delete('/user', tags=['admin'], name='删除用户')
@router.patch('/user', tags=['user'], name='用户修改信息')
@router.patch('/user/password', tags=['user'], name='用户修改密码')
@router.patch('/group', tags=['admin'], name='修改角色分工')
@router.post('/group', tags=['admin'], name='新增分组')
@router.get('/group', tags=['user', 'admin'], name='获取角色分组分工信息')
@router.delete('/group', tags=['admin'], name='删除分组')
@router.post('/division', tags=['admin'], name='新增角色分工')
@router.patch('/division', tags=['admin'], name='修改角色分工')
@router.delete('/division', tags=['admin'], name='删除角色分工')
| [
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
2129,
2412,
11,
14626,
16922,
11,
12290,
198,
6738,
3049,
15042,
13,
12961,
1330,
440,
30515,
17,
35215,
18453,
8479,
198,
6738,
3491,
21348,
13,
13376,
1330,
14626,
62,
7029,
62,
33,
2885... | 2.309345 | 931 |
"""
You should not make an instance of the Client class yourself, rather you should listen for new connections with
:meth:`~websocket.server.WebSocketServer.connection`
>>> @socket.connection
>>> async def on_connection(client: Client):
... # Here you can use the client, register callbacks on it or send it messages
... await client.writer.ping()
"""
import asyncio
import logging
import time
from .enums import DataType, State
from .reasons import Reasons, Reason
from .stream.reader import WebSocketReader
from .stream.writer import WebSocketWriter
logger = logging.getLogger(__name__)
class Client:
"""
:ivar addr: IPv4 or IPv6 address of the client.
:type addr: str
:ivar port: The port the client opened it's socket on.
:type port: int
:ivar writer: The writer used for writing frames to the client.
:type writer: WebSocketWriter
"""
def message(self, fn):
"""Decorator for registering the on_message callback.
:param fn: The callback to register.
The callback should be async and take one parameter, a :class:`~websocket.stream.reader.WebSocketReader`
This callback is called when the server receives an valid data frame,
if an exception occurs after the first valid frame e.g. if an text frame
contains invalid utf-8, or if it's an invalid fragmented message, then we
send the exception to the reader with :meth:`~websocket.stream.buffer.Buffer.set_exception`.
>>> @client.message
>>> async def on_message(reader: WebSocketReader):
... print("Got message " + await reader.get())
"""
self.on_message = fn
def ping(self, fn):
"""Decorator for registering the on_ping callback.
:param fn: The callback to register.
If you set this callback you will override the default behaviour of sending pongs back to the client when
receiving pings. If you want to keep this behaviour call :meth:`~websocket.stream.writer.WebSocketWriter.pong`.
The callback should be async and take two parameters, :class:`bytes` payload, and :class:`int` length.
This callback is called when we receive a valid ping from the client.
>>> @client.ping
>>> async def on_ping(payload: bytes, length: int):
... print("Received ping from client")
... await self.writer.pong(length, payload)
"""
self.on_ping = fn
def pong(self, fn):
"""Decorator for registering the on_pong callback.
:param fn: The callback to register.
The callback should be async and take two parameters, :class:`bytes` payload, and :class:`int` length
This callback is called when we receive a valid pong from the client.
>>> @client.pong
>>> async def on_pong(payload: bytes, length: int):
... print("Received pong from client")
"""
self.on_pong = fn
def closed(self, fn):
"""Decorator for registering the on_closed callback.
:param fn: The callback to register.
The callback should be async and take two parameters, :class:`bytes` code of length 2, and :class:`str` reason.
This callback is called when the connection this this client is closing.
>>> @client.closed
>>> async def on_closed(code: bytes, reason: str):
... print("Connection with client is closing for " + reason)
"""
self.on_closed = fn
@staticmethod
@staticmethod
HANDLERS = {opcode: Client.handle_undefined for opcode in range(0, 1 << 4)}
HANDLERS.update({
DataType.CONTINUATION.value: Client.handle_continuation,
DataType.TEXT.value: Client.handle_data(DataType.TEXT),
DataType.BINARY.value: Client.handle_data(DataType.BINARY),
DataType.CLOSE.value: Client.handle_close,
DataType.PING.value: Client.handle_ping_or_pong(DataType.PING),
DataType.PONG.value: Client.handle_ping_or_pong(DataType.PONG),
})
| [
37811,
198,
1639,
815,
407,
787,
281,
4554,
286,
262,
20985,
1398,
3511,
11,
2138,
345,
815,
6004,
329,
649,
8787,
351,
220,
198,
25,
76,
2788,
25,
63,
93,
732,
1443,
5459,
13,
15388,
13,
13908,
39105,
10697,
13,
38659,
63,
198,
1... | 2.694993 | 1,518 |
import json
import os
import re
from os.path import relpath
from time import sleep
from deployment_helpers.aws.iam import create_s3_access_credentials
from deployment_helpers.aws.rds import get_full_db_credentials
from deployment_helpers.aws.s3 import create_data_bucket
from deployment_helpers.constants import (AWS_CREDENTIALS_FILE, get_global_config,
GLOBAL_CONFIGURATION_FILE, get_aws_credentials,
VALIDATE_GLOBAL_CONFIGURATION_MESSAGE, VALIDATE_AWS_CREDENTIALS_MESSAGE,
get_pushed_full_processing_server_env_file_path, get_beiwe_environment_variables,
get_beiwe_python_environment_variables_file_path, get_finalized_credentials_file_path,
get_finalized_environment_variables, GLOBAL_CONFIGURATION_FILE_KEYS, AWS_CREDENTIALS_FILE_KEYS)
from deployment_helpers.general_utils import log, random_alphanumeric_string, EXIT
PUBLIC_DSN_REGEX = re.compile('^https://[\S]+@sentry\.io/[\S]+$')
PRIVATE_DSN_REGEX = re.compile('^https://[\S]+:[\S]+@sentry\.io/[\S]+$')
####################################################################################################
################################### Reference Configs ##############################################
####################################################################################################
####################################################################################################
################################### Reference Configs ##############################################
####################################################################################################
def _simple_validate_required(getter_func, file_path, appropriate_keys, display_name):
""" returns False if invalid, True if valid. For use with fully required keys, prints useful messages."""
# try and load, fail usefully.
try:
json_config = getter_func()
except Exception:
log.error("could not load the %s file '%s'." % (display_name, file_path))
sleep(0.1)
return False # could not load, did not pass
# check for invalid values and keyserrors
error_free = True
for k, v in json_config.iteritems():
if k not in appropriate_keys:
log.error("a key '%s' is present in %s, but was not expected." % (k, display_name))
error_free = False
if not v:
error_free = False
log.error("'%s' must be present in %s and have a value." % (k, display_name))
for key in appropriate_keys:
if key not in json_config:
log.error("the key '%s' was expected in %s but not present." % (key, display_name))
error_free = False
sleep(0.1) # python logging is dumb, wait so logs actually appear
return error_free
def ensure_nonempty_string(value, value_name, errors_list, subject):
"""
Checks that an inputted value is a nonempty string
:param value: A value to be checked
:param value_name: The name of the value, to be used in the error string
:param errors_list: The pass-by-reference list of error strings which we append to
:return: Whether or not the value is in fact a nonempty string
"""
if not isinstance(value, (str, unicode)):
# log.error(value_name + " encountered an error")
errors_list.append('({}) {} must be a string'.format(subject, value))
return False
elif not value:
# log.error(value_name + " encountered an error")
errors_list.append('({}) {} cannot be empty'.format(subject, value_name))
return False
else:
return True
| [
11748,
33918,
201,
198,
11748,
28686,
201,
198,
11748,
302,
201,
198,
6738,
28686,
13,
6978,
1330,
823,
6978,
201,
198,
6738,
640,
1330,
3993,
201,
198,
201,
198,
6738,
14833,
62,
16794,
364,
13,
8356,
13,
1789,
1330,
2251,
62,
82,
... | 2.895981 | 1,269 |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from lino_book.projects.docs.settings.demo import *
SITE = Site(globals())
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
300,
2879,
62,
2070,
13,
42068,
13,
31628,
13,
33692,
13,
9536,
78,
1330,
1635,
198,
198,
50,
... | 2.660377 | 53 |
import db
from flask import session
USERNAME_COOKIE = 'username'
ACCESS_TOKEN_COOKIE = 'access_token'
ACCESS_TOKEN_SECRET_COOKIE = 'token_secret'
def is_same(target_user):
"""Checks that the logged in user is the same user as target_user."""
if not is_logged_in():
return False
return target_user['access_token'] == session[ACCESS_TOKEN_COOKIE] \
and target_user['access_token_secret'] == session[ACCESS_TOKEN_SECRET_COOKIE]
def is_logged_in():
"""Checks if someone, anyone, is logged in."""
if not USERNAME_COOKIE in session:
return False
user = db.get_db().users.find_one({'_id' : session[USERNAME_COOKIE]})
if not user:
unset()
return False
if not ACCESS_TOKEN_COOKIE in session \
or not ACCESS_TOKEN_SECRET_COOKIE in session:
return False
return user['access_token'] == session[ACCESS_TOKEN_COOKIE] \
and user['access_token_secret'] == session[ACCESS_TOKEN_SECRET_COOKIE]
| [
11748,
20613,
198,
198,
6738,
42903,
1330,
6246,
628,
198,
29904,
20608,
62,
34,
15308,
10008,
796,
705,
29460,
6,
198,
26861,
7597,
62,
10468,
43959,
62,
34,
15308,
10008,
796,
705,
15526,
62,
30001,
6,
198,
26861,
7597,
62,
10468,
4... | 2.45614 | 399 |
import torch
import torch.optim as optim
import torch.multiprocessing
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast as autocast # 自动混合精度,提高计算速度,降低显存使用
from torch.utils.data import DataLoader
from torchvision import transforms
from models.model_with_tcn_big import Model
from models.loss_kernels import DICE_loss
from models.loss_ctc import ctc_loss
from dataset.data_utils_kernel_box_from_dgrl import MyDataset, AlignCollate
from dataset.hwdb2_0_chars import char_dict, char_set
from utils.logger import logger
from utils.config import Config
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
scaler = GradScaler()
torch.multiprocessing.set_sharing_strategy('file_system') # 设置共享CPU张量的策略
device = torch.device('cuda')
config = Config('config.yml')
# dataloader
train_dataset = MyDataset(config.train_data_dir, char_dict, data_shape=1600, n=2, m=0.6, transform=transforms.ToTensor(), max_text_length=80)
eval_dataset = MyDataset(config.eval_data_dir, char_dict, data_shape=1600, n=2, m=0.6, transform=transforms.ToTensor(), max_text_length=80, is_train=False)
train_dataloader = DataLoader(dataset=train_dataset, collate_fn=AlignCollate(), batch_size=config.train_batch_size, shuffle=True, num_workers=config.num_workers, pin_memory=True)
eval_dataloader = DataLoader(dataset=eval_dataset, collate_fn=AlignCollate(), batch_size=config.eval_batch_size, shuffle=True, num_workers=config.num_workers, pin_memory=True)
train_steps = len(train_dataloader)
eval_steps = len(eval_dataloader)
print("Training steps: %d, evaluation steps: %d" % (train_steps, eval_steps))
model = Model(num_classes=config.num_classes, line_height=config.line_height, is_transformer=True, is_TCN=True).to(device)
criterion_kernel = DICE_loss().to(device)
criterion_char = torch.nn.CTCLoss(blank=0, zero_infinity=True).to(device)
max_CR = 0
if __name__ == '__main__':
# pre_dict = torch.load(
# './output/with_tcn_big_icdar/model_new1_epoch_13_loss_char_all_0.3923_loss_kernel_all_0.1185_AR_0.911840_CR_0.920156.pth')
# pre_dict.pop('DenseNet_layer.classifier.weight')
# pre_dict.pop('DenseNet_layer.classifier.bias')
# model_dict = model.state_dict()
# pre_dict = {k: v for k, v in pre_dict.items() if k in model_dict}
# model_dict.update(pre_dict)
# model.load_state_dict(model_dict)
# model.load_state_dict(torch.load(
# r'./output/with_tcn_big_hwdb_all_t'
# r'/model_c_epoch_50_loss_char_all_0.0642_loss_kernel_all_0.1226_AR_0.987677_CR_0.990463.pth'))
# eval(model, eval_data, criterion_kernel, criterion_char, 0,is_save=False)
train()
| [
11748,
28034,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
16680,
541,
305,
919,
278,
198,
6738,
28034,
13,
66,
15339,
13,
696,
1330,
17701,
3351,
36213,
198,
6738,
28034,
13,
66,
15339,
13,
696,
1330,
1960,
420,
45... | 2.454968 | 1,077 |
# Copyright 2017 Lajos Gerecs, Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import itertools
import json
import logging
import os
from pprint import pformat
from unittest import BaseTestSuite
from testframework.generator.generator import DEFAULT_SEED
from testframework.runner import RunnableTestCaseInfo
from testframework.testcases.basic import BasicSuccessfulTestCase
log = logging.getLogger()
class DynamicallyGeneratedTestCase(BasicSuccessfulTestCase):
"""
Test case class which generates the required resource and request files for
the actual testcase on the fly based on the given parameters.
Example config:
"test": {
"module": "testframework.testcases",
"class": "DynamicallyGeneratedTestCase",
"request_cfg": {
"generator": "eight_loop_requests",
"abc_nf_types_len": 10,
"seed": 0,
"eightloops": 3
},
"topology_cfg": {
"generator": "xxx",
"seed": 15,
...
}
}
"""
GENERATOR_MODULE = "testframework.generator.generator"
GENERATOR_ENTRY_NAME = "generator"
REQUEST_FILE_NAME = "gen-request.nffg"
TOPOLOGY_FILE_NAME = "gen-topology.nffg"
def __init__ (self, request_cfg=None, topology_cfg=None, **kwargs):
"""
:type request_cfg: dict
:type topology_cfg: dict
:type kwargs: dict
"""
super(DynamicallyGeneratedTestCase, self).__init__(**kwargs)
self.request_cfg = request_cfg
self.new_req = False
self.topology_cfg = topology_cfg
self.new_topo = False
log.debug("request_cfg:\n%s\ntopology_cfg:\n%s"
% (pformat(self.request_cfg, indent=2),
pformat(self.topology_cfg, indent=2)))
@classmethod
def __generate_nffg (cls, cfg):
"""
:type cfg: dict
:rtype: :any:`NFFG`
"""
# If config is not empty and testcase is properly configured
if not cfg or cls.GENERATOR_ENTRY_NAME not in cfg:
return None
params = cfg.copy()
try:
generator_func = getattr(importlib.import_module(cls.GENERATOR_MODULE),
params.pop(cls.GENERATOR_ENTRY_NAME))
return generator_func(**params) if generator_func else None
except AttributeError as e:
raise Exception("Generator function is not found: %s" % e.message)
def dump_generated_nffg (self, cfg, file_name):
"""
:type file_name: str
:return: generation was successful
:rtype: bool
"""
nffg = self.__generate_nffg(cfg=cfg)
if nffg is not None:
req_file_name = os.path.join(self.test_case_info.full_testcase_path,
file_name)
with open(req_file_name, "w") as f:
# f.write(nffg.dump_to_json())
json.dump(nffg.dump_to_json(), f, indent=2, sort_keys=True)
return True
class DynamicTestGenerator(BaseTestSuite):
"""
Special TestSuite class which populate itself with TestCases based on the
given parameters.
Example config:
"test": {
"module": "testframework.testcases",
"class": "DynamicTestGenerator",
"full_combination": true,
"num_of_requests": 3,
"num_of_topos": 5,
"testcase_cfg": {
"module": "testframework.testcases",
"class": "DynamicallyGeneratedTestCase",
"request_cfg": {
"generator": "eight_loop_requests",
"seed": 0
},
"topology_cfg": {
"generator": "xxx",
"seed": 0
}
}
}
"""
DEFAULT_TESTCASE_CLASS = DynamicallyGeneratedTestCase
REQUEST_CFG_NAME = "request_cfg"
TOPOLOGY_CFG_NAME = "topology_cfg"
SEED_NAME = "seed"
def __init__ (self, test_case_info, command_runner, testcase_cfg=None,
full_combination=False, num_of_requests=1, num_of_topos=1,
**kwargs):
"""
:type test_case_info: RunnableTestCaseInfo
:type command_runner: ESCAPECommandRunner
"""
super(DynamicTestGenerator, self).__init__(kwargs.get("tests", ()))
self.test_case_info = test_case_info
self.command_runner = command_runner
self.testcase_cfg = testcase_cfg
self.full_combination = full_combination
self.num_of_requests = num_of_requests
self.num_of_topos = num_of_topos
self._create_test_cases()
def _get_seed_generator (self):
"""
Return an iterator which generates the tuple (request, topology) of seed
values for test cases based on the config values:
* default seed value which can be a number or a list of seed values
* number of generated request/topology
* test generation mode (full_combination or ordered pairs of request/topo)
If the seed value is a number, this generator considers it as the first
value of the used seed interval.
If the seed value is a list, this generator considers it as the seed
interval and the number_of_* parameters mark out the used values from the
beginning of the seed intervals.
Based on the request and topology seed intervals this function generates
the pairs of seeds using the full_combination flag.
Generation modes (full_combination, num_of_requests, num_of_topos):
False, 0, 0, --> 1 testcase WITHOUT generation
False, N>0, 0 --> 1 testcase with ONLY request generation
False, 0, M>0 --> 1 testcase with ONLY topology generation
False, N>0, M>0 --> min(N, M) testcase with generated ordered pairs
---------------------------------------------------------------------
True, 0, 0, --> 1 testcase WITHOUT generation
True, N>0, 0 --> N testcase with ONLY request generation
True, 0, M>0 --> M testcase with ONLY topology generation
True, N>0, M>0 --> N x M testcase with generated input (cartesian)
:return: iterator
"""
seed_iterators = []
# If config is missing, return with no seed pairs
if not self.testcase_cfg:
return ()
if self.num_of_requests > 0 and self.REQUEST_CFG_NAME in self.testcase_cfg:
# If seed value is given
if self.SEED_NAME in self.testcase_cfg[self.REQUEST_CFG_NAME]:
seed = self.testcase_cfg[self.REQUEST_CFG_NAME][self.SEED_NAME]
# If seed list is explicitly given
if isinstance(seed, list):
seed_iterators.append(iter(seed))
else:
seed_iterators.append(xrange(seed, seed + self.num_of_requests))
else:
# Use default seed value for seed list
seed_iterators.append(
xrange(DEFAULT_SEED, DEFAULT_SEED + self.num_of_requests))
else:
# Use specific tuple with None value to feed the pair generator function
seed_iterators.append((None,))
if self.num_of_topos > 0 and self.TOPOLOGY_CFG_NAME in self.testcase_cfg:
# If seed value is given
if self.SEED_NAME in self.testcase_cfg[self.TOPOLOGY_CFG_NAME]:
seed = self.testcase_cfg[self.TOPOLOGY_CFG_NAME][self.SEED_NAME]
# If seed list is explicitly given
if isinstance(seed, list):
seed_iterators.append(iter(seed))
else:
seed_iterators.append(xrange(seed, seed + self.num_of_topos))
else:
# Use default seed value for seed list
seed_iterators.append(
xrange(DEFAULT_SEED, DEFAULT_SEED + self.num_of_topos))
else:
# Use specific tuple with None value to feed the pair generator function
seed_iterators.append((None,))
if self.full_combination:
# Generate Cartesian product
return itertools.product(*seed_iterators)
else:
# Generate pairs based on the value position in the lists
return itertools.izip(*seed_iterators)
| [
2,
15069,
2177,
406,
1228,
418,
402,
567,
6359,
11,
2365,
418,
327,
89,
3787,
68,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.575421 | 3,149 |
from django.contrib.auth.mixins import LoginRequiredMixin as LoginRequired
from django.contrib.auth import logout, authenticate
from django.contrib.auth import update_session_auth_hash
from django.views.generic import FormView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from django.conf import settings
from .forms import UpdateUserInfoForm, LoginForm, SignupForm
from .models import User
class UpdateSettingsView(LoginRequiredMixin, FormView):
"""Lets the user update his setttings"""
template_name = "humans/update_user_form.html"
form_class = UpdateUserInfoForm
success_url = reverse_lazy("humans_update")
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
35608,
259,
355,
23093,
37374,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
2604,
448,
11,
8323,
5344,
198,
6738,
42625,
14208,
13,
3642,
822... | 3.414747 | 217 |
# Copyright (c) 2012, 2013 Ricardo Andrade
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats, special
import scipy as sp
import gp_transformations
from noise_distributions import NoiseDistribution
from scipy import stats, integrate
from scipy.special import gammaln, gamma
class StudentT(NoiseDistribution):
"""
Student T likelihood
For nomanclature see Bayesian Data Analysis 2003 p576
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\Gamma\\left(\\frac{v+1}{2}\\right)}{\\Gamma\\left(\\frac{v}{2}\\right)\\sqrt{v\\pi\\sigma^{2}}}\\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - f_{i})^{2}}{\\sigma^{2}}\\right)\\right)^{\\frac{-v+1}{2}}
"""
@property
def pdf_link(self, link_f, y, extra_data=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\Gamma\\left(\\frac{v+1}{2}\\right)}{\\Gamma\\left(\\frac{v}{2}\\right)\\sqrt{v\\pi\\sigma^{2}}}\\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \\lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)^{\\frac{-v+1}{2}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
#Careful gamma(big_number) is infinity!
objective = ((np.exp(gammaln((self.v + 1)*0.5) - gammaln(self.v * 0.5))
/ (np.sqrt(self.v * np.pi * self.sigma2)))
* ((1 + (1./float(self.v))*((e**2)/float(self.sigma2)))**(-0.5*(self.v + 1)))
)
return np.prod(objective)
def logpdf_link(self, link_f, y, extra_data=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\ln \\Gamma\\left(\\frac{v+1}{2}\\right) - \\ln \\Gamma\\left(\\frac{v}{2}\\right) - \\ln \\sqrt{v \\pi\\sigma^{2}} - \\frac{v+1}{2}\\ln \\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
objective = (+ gammaln((self.v + 1) * 0.5)
- gammaln(self.v * 0.5)
- 0.5*np.log(self.sigma2 * self.v * np.pi)
- 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2))
)
return np.sum(objective)
def dlogpdf_dlink(self, link_f, y, extra_data=None):
"""
Gradient of the log likelihood function at y, given link(f) w.r.t link(f)
.. math::
\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{d\\lambda(f)} = \\frac{(v+1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v}
:param link_f: latent variables (f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: gradient of likelihood evaluated at points
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
grad = ((self.v + 1) * e) / (self.v * self.sigma2 + (e**2))
return grad
def d2logpdf_dlink2(self, link_f, y, extra_data=None):
"""
Hessian at y, given link(f), w.r.t link(f)
i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = \\frac{(v+1)((y_{i}-\lambda(f_{i}))^{2} - \\sigma^{2}v)}{((y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v)^{2}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f)
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
hess = ((self.v + 1)*(e**2 - self.v*self.sigma2)) / ((self.sigma2*self.v + e**2)**2)
return hess
def d3logpdf_dlink3(self, link_f, y, extra_data=None):
"""
Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{-2(v+1)((y_{i} - \lambda(f_{i}))^3 - 3(y_{i} - \lambda(f_{i})) \\sigma^{2} v))}{((y_{i} - \lambda(f_{i})) + \\sigma^{2} v)^3}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: third derivative of likelihood evaluated at points f
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
d3lik_dlink3 = ( -(2*(self.v + 1)*(-e)*(e**2 - 3*self.v*self.sigma2)) /
((e**2 + self.sigma2*self.v)**3)
)
return d3lik_dlink3
def dlogpdf_link_dvar(self, link_f, y, extra_data=None):
"""
Gradient of the log-likelihood function at y given f, w.r.t variance parameter (t_noise)
.. math::
\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{d\\sigma^{2}} = \\frac{v((y_{i} - \lambda(f_{i}))^{2} - \\sigma^{2})}{2\\sigma^{2}(\\sigma^{2}v + (y_{i} - \lambda(f_{i}))^{2})}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
dlogpdf_dvar = self.v*(e**2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e**2))
return np.sum(dlogpdf_dvar)
def dlogpdf_dlink_dvar(self, link_f, y, extra_data=None):
"""
Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param link_f: latent variables link_f
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
dlogpdf_dlink_dvar = (self.v*(self.v+1)*(-e))/((self.sigma2*self.v + e**2)**2)
return dlogpdf_dlink_dvar
def d2logpdf_dlink2_dvar(self, link_f, y, extra_data=None):
"""
Gradient of the hessian (d2logpdf_dlink2) w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}f}) = \\frac{v(v+1)(\\sigma^{2}v - 3(y_{i} - \lambda(f_{i}))^{2})}{(\\sigma^{2}v + (y_{i} - \lambda(f_{i}))^{2})^{3}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param extra_data: extra_data which is not used in student t distribution
:returns: derivative of hessian evaluated at points f and f_j w.r.t variance parameter
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
d2logpdf_dlink2_dvar = ( (self.v*(self.v+1)*(self.sigma2*self.v - 3*(e**2)))
/ ((self.sigma2*self.v + (e**2))**3)
)
return d2logpdf_dlink2_dvar
def _predictive_variance_analytical(self, mu, sigma, predictive_mean=None):
"""
Compute predictive variance of student_t*normal p(y*|f*)p(f*)
Need to find what the variance is at the latent points for a student t*normal p(y*|f*)p(f*)
(((g((v+1)/2))/(g(v/2)*s*sqrt(v*pi)))*(1+(1/v)*((y-f)/s)^2)^(-(v+1)/2))
*((1/(s*sqrt(2*pi)))*exp(-(1/(2*(s^2)))*((y-f)^2)))
"""
#FIXME: Not correct
#We want the variance around test points y which comes from int p(y*|f*)p(f*) df*
#Var(y*) = Var(E[y*|f*]) + E[Var(y*|f*)]
#Since we are given f* (mu) which is our mean (expected) value of y*|f* then the variance is the variance around this
#Which was also given to us as (var)
#We also need to know the expected variance of y* around samples f*, this is the variance of the student t distribution
#However the variance of the student t distribution is not dependent on f, only on sigma and the degrees of freedom
true_var = 1/(1/sigma**2 + 1/self.variance)
return true_var
def _predictive_mean_analytical(self, mu, sigma):
"""
Compute mean of the prediction
"""
#FIXME: Not correct
return mu
def samples(self, gp):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
#FIXME: Very slow as we are computing a new random variable per input!
#Can't get it to sample all at the same time
#student_t_samples = np.array([stats.t.rvs(self.v, self.gp_link.transf(gpj),scale=np.sqrt(self.sigma2), size=1) for gpj in gp])
dfs = np.ones_like(gp)*self.v
scales = np.ones_like(gp)*np.sqrt(self.sigma2)
student_t_samples = stats.t.rvs(dfs, loc=self.gp_link.transf(gp),
scale=scales)
return student_t_samples.reshape(orig_shape)
| [
2,
15069,
357,
66,
8,
2321,
11,
2211,
38847,
843,
27585,
198,
2,
49962,
739,
262,
347,
10305,
513,
12,
565,
682,
5964,
357,
3826,
38559,
24290,
13,
14116,
8,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
... | 2.040763 | 5,348 |
# Faça um programa que tenha uma função chamada contador(), que receba três parâmetros: início, fim e passo e realize
# a contagem.
# Seu programa tem que realizar três contagens através da função criada:
# a) De 1 até 10, de 1 em 1
# b) De 10 até 0, de 2 em 2
# c) Uma contagem personalizada.
from time import sleep as pausa
print('=-' * 30)
contador(1, 10, 1)
print('=-' * 30)
contador(10, 0, 2)
print('=-' * 30)
print('Contagem personalizada:')
begin = int(input('Inicio: '))
end = int(input('Fim: '))
step = int(input('Passo: '))
if step < 0:
step *= -1
elif step == 0:
step = 1
print('=-' * 30)
contador(begin, end, step)
| [
2,
18350,
50041,
23781,
1430,
64,
8358,
3478,
3099,
334,
2611,
1257,
16175,
28749,
442,
321,
4763,
542,
7079,
22784,
8358,
1407,
7012,
491,
25792,
82,
1582,
22940,
4164,
4951,
25,
287,
8836,
66,
952,
11,
277,
320,
304,
1208,
78,
304,
... | 2.367647 | 272 |
# -*- coding: utf-8 -*-
import click
@click.group()
from .merge import *
from .dedup import *
from .compile_step_stats import *
from .stats_percentage import *
from .sum_stats import *
from .bt2_log_to_csv import *
if __name__ == "__main__":
cli()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
3904,
198,
198,
31,
12976,
13,
8094,
3419,
198,
198,
6738,
764,
647,
469,
220,
220,
220,
1330,
1635,
198,
6738,
764,
9395,
929,
1330,
1635,
198,
6738,
764,
5589... | 2.539216 | 102 |
#!/usr/bin/env python
'''
A solution to a ROSALIND bioinformatics problem.
Problem Title: Finding a Shared Motif
Rosalind ID: LCSM
Rosalind #: 014
URL: http://rosalind.info/problems/lcsm/
'''
from scripts import ReadFASTA
def LongestSubstring(string_list):
'''Extracts all substrings from the first string in a list, and sends longest substring candidates to be checked.'''
longest = ''
for start_index in xrange(len(string_list[0])):
for end_index in xrange(len(string_list[0]), start_index, -1):
# Break if the length becomes too small, as it will only get smaller.
if end_index - start_index <= len(longest):
break
elif CheckSubstring(string_list[0][start_index:end_index], string_list):
longest = string_list[0][start_index:end_index]
return longest
def CheckSubstring(find_string, string_list):
'Checks if a given substring appears in all members of a given collection of strings and returns True/False.'
for string in string_list:
if (len(string) < len(find_string)) or (find_string not in string):
return False
return True
if __name__ == '__main__':
fasta_list = ReadFASTA('data/rosalind_lcsm.txt')
dna = []
for fasta in fasta_list:
dna.append(fasta[1])
lcsm = LongestSubstring(dna)
print lcsm
with open('output/014_LCSM.txt', 'w') as output_data:
output_data.write(lcsm)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
32,
4610,
284,
257,
48263,
1847,
12115,
13401,
259,
18982,
873,
1917,
13,
198,
198,
40781,
11851,
25,
27063,
257,
39403,
6543,
361,
198,
35740,
282,
521,
4522,
25,
38217,... | 2.738866 | 494 |
from tools2Dgauss import *
from figPlots import *
### PLOTS ARE LISTED FIRST
### COMPUTATIONS
## make 2D chi2 image as a function of X position and counts
def gauss2Dastrom(muX, muY, alpha, A, Bkgd, Xpixels, Ypixels):
"""2D circular gaussian + background"""
r = np.sqrt((Xpixels-muX)**2 + (Ypixels-muY)**2)
# make and set image to the background value
image = np.empty(r.shape)
image.fill(Bkgd)
## now add circular gaussian profile (area is normalized to A)
if (1):
sourceImage = A*np.exp(-r**2/2/alpha**2) / (2*math.pi*alpha**2)
else:
# double gaussian: 1:10 amplitude ratio and sigma2 = 2*sigma1
sourceImage = 0.909*A*np.exp(-r**2/2/alpha**2) / (2*math.pi*alpha**2)
alpha2 = alpha*2
sourceImage += 0.091*A*np.exp(-r**2/2/alpha2**2) / (2*math.pi*alpha2**2)
image += sourceImage
return image, sourceImage
| [
6738,
4899,
17,
35,
4908,
1046,
1330,
1635,
198,
6738,
2336,
3646,
1747,
1330,
1635,
628,
198,
21017,
9297,
33472,
15986,
39498,
1961,
31328,
628,
198,
21017,
24301,
3843,
18421,
220,
628,
198,
2235,
787,
362,
35,
33166,
17,
2939,
355,
... | 2.312821 | 390 |
# pylint: disable=no-member
import torch
from models import AutoEncoder
PATH_TO_EMBEDDER = 'neural/checkpoints/ae-512-224x224-loss-0.024.pth'
| [
2,
279,
2645,
600,
25,
15560,
28,
3919,
12,
19522,
198,
198,
11748,
28034,
198,
198,
6738,
4981,
1330,
11160,
27195,
12342,
198,
198,
34219,
62,
10468,
62,
3620,
33,
1961,
14418,
796,
705,
710,
1523,
14,
9122,
13033,
14,
3609,
12,
2... | 2.517241 | 58 |
import os
import wx
import time
| [
11748,
28686,
198,
11748,
266,
87,
198,
11748,
640,
628
] | 3.3 | 10 |
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateAutonomousContainerDatabaseDetails(object):
"""
Describes the required parameters for the creation of an Autonomous Container Database.
"""
#: A constant which can be used with the service_level_agreement_type property of a CreateAutonomousContainerDatabaseDetails.
#: This constant has a value of "STANDARD"
SERVICE_LEVEL_AGREEMENT_TYPE_STANDARD = "STANDARD"
#: A constant which can be used with the patch_model property of a CreateAutonomousContainerDatabaseDetails.
#: This constant has a value of "RELEASE_UPDATES"
PATCH_MODEL_RELEASE_UPDATES = "RELEASE_UPDATES"
#: A constant which can be used with the patch_model property of a CreateAutonomousContainerDatabaseDetails.
#: This constant has a value of "RELEASE_UPDATE_REVISIONS"
PATCH_MODEL_RELEASE_UPDATE_REVISIONS = "RELEASE_UPDATE_REVISIONS"
def __init__(self, **kwargs):
"""
Initializes a new CreateAutonomousContainerDatabaseDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this CreateAutonomousContainerDatabaseDetails.
:type display_name: str
:param service_level_agreement_type:
The value to assign to the service_level_agreement_type property of this CreateAutonomousContainerDatabaseDetails.
Allowed values for this property are: "STANDARD"
:type service_level_agreement_type: str
:param autonomous_exadata_infrastructure_id:
The value to assign to the autonomous_exadata_infrastructure_id property of this CreateAutonomousContainerDatabaseDetails.
:type autonomous_exadata_infrastructure_id: str
:param compartment_id:
The value to assign to the compartment_id property of this CreateAutonomousContainerDatabaseDetails.
:type compartment_id: str
:param patch_model:
The value to assign to the patch_model property of this CreateAutonomousContainerDatabaseDetails.
Allowed values for this property are: "RELEASE_UPDATES", "RELEASE_UPDATE_REVISIONS"
:type patch_model: str
:param maintenance_window_details:
The value to assign to the maintenance_window_details property of this CreateAutonomousContainerDatabaseDetails.
:type maintenance_window_details: MaintenanceWindow
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateAutonomousContainerDatabaseDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateAutonomousContainerDatabaseDetails.
:type defined_tags: dict(str, dict(str, object))
:param backup_config:
The value to assign to the backup_config property of this CreateAutonomousContainerDatabaseDetails.
:type backup_config: AutonomousContainerDatabaseBackupConfig
"""
self.swagger_types = {
'display_name': 'str',
'service_level_agreement_type': 'str',
'autonomous_exadata_infrastructure_id': 'str',
'compartment_id': 'str',
'patch_model': 'str',
'maintenance_window_details': 'MaintenanceWindow',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'backup_config': 'AutonomousContainerDatabaseBackupConfig'
}
self.attribute_map = {
'display_name': 'displayName',
'service_level_agreement_type': 'serviceLevelAgreementType',
'autonomous_exadata_infrastructure_id': 'autonomousExadataInfrastructureId',
'compartment_id': 'compartmentId',
'patch_model': 'patchModel',
'maintenance_window_details': 'maintenanceWindowDetails',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'backup_config': 'backupConfig'
}
self._display_name = None
self._service_level_agreement_type = None
self._autonomous_exadata_infrastructure_id = None
self._compartment_id = None
self._patch_model = None
self._maintenance_window_details = None
self._freeform_tags = None
self._defined_tags = None
self._backup_config = None
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this CreateAutonomousContainerDatabaseDetails.
The display name for the Autonomous Container Database.
:return: The display_name of this CreateAutonomousContainerDatabaseDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateAutonomousContainerDatabaseDetails.
The display name for the Autonomous Container Database.
:param display_name: The display_name of this CreateAutonomousContainerDatabaseDetails.
:type: str
"""
self._display_name = display_name
@property
def service_level_agreement_type(self):
"""
Gets the service_level_agreement_type of this CreateAutonomousContainerDatabaseDetails.
The service level agreement type of the Autonomous Container Database. The default is STANDARD. For a mission critical Autonomous Container Database, the specified Autonomous Exadata Infrastructure must be associated with a remote Autonomous Exadata Infrastructure.
Allowed values for this property are: "STANDARD"
:return: The service_level_agreement_type of this CreateAutonomousContainerDatabaseDetails.
:rtype: str
"""
return self._service_level_agreement_type
@service_level_agreement_type.setter
def service_level_agreement_type(self, service_level_agreement_type):
"""
Sets the service_level_agreement_type of this CreateAutonomousContainerDatabaseDetails.
The service level agreement type of the Autonomous Container Database. The default is STANDARD. For a mission critical Autonomous Container Database, the specified Autonomous Exadata Infrastructure must be associated with a remote Autonomous Exadata Infrastructure.
:param service_level_agreement_type: The service_level_agreement_type of this CreateAutonomousContainerDatabaseDetails.
:type: str
"""
allowed_values = ["STANDARD"]
if not value_allowed_none_or_none_sentinel(service_level_agreement_type, allowed_values):
raise ValueError(
"Invalid value for `service_level_agreement_type`, must be None or one of {0}"
.format(allowed_values)
)
self._service_level_agreement_type = service_level_agreement_type
@property
def autonomous_exadata_infrastructure_id(self):
"""
**[Required]** Gets the autonomous_exadata_infrastructure_id of this CreateAutonomousContainerDatabaseDetails.
The OCID of the Autonomous Exadata Infrastructure.
:return: The autonomous_exadata_infrastructure_id of this CreateAutonomousContainerDatabaseDetails.
:rtype: str
"""
return self._autonomous_exadata_infrastructure_id
@autonomous_exadata_infrastructure_id.setter
def autonomous_exadata_infrastructure_id(self, autonomous_exadata_infrastructure_id):
"""
Sets the autonomous_exadata_infrastructure_id of this CreateAutonomousContainerDatabaseDetails.
The OCID of the Autonomous Exadata Infrastructure.
:param autonomous_exadata_infrastructure_id: The autonomous_exadata_infrastructure_id of this CreateAutonomousContainerDatabaseDetails.
:type: str
"""
self._autonomous_exadata_infrastructure_id = autonomous_exadata_infrastructure_id
@property
def compartment_id(self):
"""
Gets the compartment_id of this CreateAutonomousContainerDatabaseDetails.
The `OCID`__ of the compartment containing the Autonomous Container Database.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this CreateAutonomousContainerDatabaseDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateAutonomousContainerDatabaseDetails.
The `OCID`__ of the compartment containing the Autonomous Container Database.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this CreateAutonomousContainerDatabaseDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def patch_model(self):
"""
**[Required]** Gets the patch_model of this CreateAutonomousContainerDatabaseDetails.
Database Patch model preference.
Allowed values for this property are: "RELEASE_UPDATES", "RELEASE_UPDATE_REVISIONS"
:return: The patch_model of this CreateAutonomousContainerDatabaseDetails.
:rtype: str
"""
return self._patch_model
@patch_model.setter
def patch_model(self, patch_model):
"""
Sets the patch_model of this CreateAutonomousContainerDatabaseDetails.
Database Patch model preference.
:param patch_model: The patch_model of this CreateAutonomousContainerDatabaseDetails.
:type: str
"""
allowed_values = ["RELEASE_UPDATES", "RELEASE_UPDATE_REVISIONS"]
if not value_allowed_none_or_none_sentinel(patch_model, allowed_values):
raise ValueError(
"Invalid value for `patch_model`, must be None or one of {0}"
.format(allowed_values)
)
self._patch_model = patch_model
@property
def maintenance_window_details(self):
"""
Gets the maintenance_window_details of this CreateAutonomousContainerDatabaseDetails.
:return: The maintenance_window_details of this CreateAutonomousContainerDatabaseDetails.
:rtype: MaintenanceWindow
"""
return self._maintenance_window_details
@maintenance_window_details.setter
def maintenance_window_details(self, maintenance_window_details):
"""
Sets the maintenance_window_details of this CreateAutonomousContainerDatabaseDetails.
:param maintenance_window_details: The maintenance_window_details of this CreateAutonomousContainerDatabaseDetails.
:type: MaintenanceWindow
"""
self._maintenance_window_details = maintenance_window_details
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateAutonomousContainerDatabaseDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateAutonomousContainerDatabaseDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateAutonomousContainerDatabaseDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateAutonomousContainerDatabaseDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateAutonomousContainerDatabaseDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateAutonomousContainerDatabaseDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateAutonomousContainerDatabaseDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateAutonomousContainerDatabaseDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def backup_config(self):
"""
Gets the backup_config of this CreateAutonomousContainerDatabaseDetails.
:return: The backup_config of this CreateAutonomousContainerDatabaseDetails.
:rtype: AutonomousContainerDatabaseBackupConfig
"""
return self._backup_config
@backup_config.setter
def backup_config(self, backup_config):
"""
Sets the backup_config of this CreateAutonomousContainerDatabaseDetails.
:param backup_config: The backup_config of this CreateAutonomousContainerDatabaseDetails.
:type: AutonomousContainerDatabaseBackupConfig
"""
self._backup_config = backup_config
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
15069,
357,
66,
8,
1584,
11,
12131,
11,
18650,
290,
14,
273,
663,
29116,
13,
220,
1439,
2489,
10395,
13,
198,
2,
770,
3788,
318,
10668,
12,
36612,
284,
345,
739,
262,
14499,
2448,
33532,
1... | 2.800615 | 5,206 |
import torch, math
import torch.nn as nn
# B: batch size
# T: max sequence length
# E: word embedding size
# C: conn embeddings size
# H: hidden size
# Y: output size
# N_dir: num directions
# N_layer: num layers
# L_i: length of sequence i
################
# Model Layers #
################
################
# Input Layers #
################
| [
11748,
28034,
11,
10688,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
2,
347,
25,
15458,
2546,
198,
2,
309,
25,
3509,
8379,
4129,
198,
2,
412,
25,
1573,
11525,
12083,
2546,
198,
2,
327,
25,
48260,
11525,
67,
654,
2546,
19... | 3.311321 | 106 |
from os.path import basename
import torch
from commode_utils.callback import PrintEpochResultCallback, UploadCheckpointCallback
from omegaconf import DictConfig
from pytorch_lightning import LightningModule, LightningDataModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
| [
6738,
28686,
13,
6978,
1330,
1615,
12453,
198,
198,
11748,
28034,
198,
6738,
725,
1098,
62,
26791,
13,
47423,
1330,
12578,
13807,
5374,
23004,
47258,
11,
36803,
9787,
4122,
47258,
198,
6738,
267,
28917,
7807,
69,
1330,
360,
713,
16934,
... | 3.927835 | 97 |
import torch
import numpy as np
from scipy import stats
import pandas as pd
#when not using ranked output i.e. not explaining the outputs (therefore exlaining the z dimension or mu)
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
9756,
198,
11748,
19798,
292,
355,
279,
67,
628,
628,
198,
2,
12518,
407,
1262,
10307,
5072,
1312,
13,
68,
13,
407,
11170,
262,
23862,
357,
8117,
754,
... | 3.518519 | 54 |
import pandas as pd
import os
import wget
import pathlib
from pathlib import Path
PROJECT_DIR = Path(__file__).parent.parent
DM_USE_CASES = ["Structured_Fodors-Zagats", "Structured_DBLP-GoogleScholar",
"Structured_DBLP-ACM", "Structured_Amazon-Google",
"Structured_Walmart-Amazon", "Structured_Beer",
"Structured_iTunes-Amazon", "Textual_Abt-Buy",
"Dirty_iTunes-Amazon", "Dirty_DBLP-ACM",
"Dirty_DBLP-GoogleScholar", "Dirty_Walmart-Amazon"]
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
266,
1136,
198,
11748,
3108,
8019,
198,
6738,
3108,
8019,
1330,
10644,
628,
198,
31190,
23680,
62,
34720,
796,
10644,
7,
834,
7753,
834,
737,
8000,
13,
8000,
198,
23127,
... | 2.112903 | 248 |
'''
https://leetcode.com/problems/longest-nice-substring/
A string s is nice if, for every letter of the alphabet that s contains, it appears both in uppercase and lowercase. For example, "abABB" is nice because 'A' and 'a' appear, and 'B' and 'b' appear. However, "abA" is not because 'b' appears, but 'B' does not.
Given a string s, return the longest substring of s that is nice. If there are multiple, return the substring of the earliest occurrence. If there are none, return an empty string.
Example 1:
Input: s = "YazaAay"
Output: "aAa"
Explanation: "aAa" is a nice string because 'A/a' is the only letter of the alphabet in s, and both 'A' and 'a' appear.
"aAa" is the longest nice substring.
Example 2:
Input: s = "Bb"
Output: "Bb"
Explanation: "Bb" is a nice string because both 'B' and 'b' appear. The whole string is a substring.
Example 3:
Input: s = "c"
Output: ""
Explanation: There are no nice substrings.
Example 4:
Input: s = "dDzeE"
Output: "dD"
Explanation: Both "dD" and "eE" are the longest nice substrings.
As there are multiple longest nice substrings, return "dD" since it occurs earlier.
Constraints:
1 <= s.length <= 100
s consists of uppercase and lowercase English letters.
''' | [
7061,
6,
198,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
6511,
395,
12,
44460,
12,
7266,
8841,
14,
198,
198,
32,
4731,
264,
318,
3621,
611,
11,
329,
790,
3850,
286,
262,
24830,
326,
264,
4909,
11,
340,
3568,
1111,
... | 3.022333 | 403 |
#!python
def contains(text, pattern):
"""Return a boolean indicating whether pattern occurs in text."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# TODO: Implement contains here (iteratively and/or recursively)
index = find_index(text, pattern)
if index != None:
return True
return False
def find_index(text, pattern):
"""Return the starting index of the first occurrence of pattern in text,
or None if not found."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# TODO: Implement find_index here (iteratively and/or recursively)
window = len(pattern)
if len(pattern) == 0:
return 0
else:
index = 0
# change the wile loop to for loop bc we know the number of iterations
# greater or equals to catch the patter if it's last index
while index <= len(text) - 1:
# running time is "n" iterations => O(n*m) is total runnning time
if pattern == text[index : window + index]:
# C++ way checking the index is faster and save up the memory and copying the string slice
# this is going to be O(m) if the pattern is big like paragraph
# and uses more memory O(m)
return index
index += 1
return None
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# instead of starting at 0, I can start where i found patter and start at the index + 1
index = 0
window = len(pattern)
indexes = []
if pattern == '':
# for empty pattern creates list of indecies of the text
return list(range(len(text)))
else:
# greater or equals to catch the patter if it's last index
while index <= len(text) - 1:
if pattern == text[index:window + index]:
indexes.append(index)
index += 1
return indexes
def main():
"""Read command-line arguments and test string searching algorithms."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 2:
text = args[0]
pattern = args[1]
test_string_algorithms(text, pattern)
else:
script = sys.argv[0]
print('Usage: {} text pattern'.format(script))
print('Searches for occurrences of pattern in text')
print("\nExample: {} 'abra cadabra' 'abra'".format(script))
print("contains('abra cadabra', 'abra') => True")
print("find_index('abra cadabra', 'abra') => 0")
print("find_all_indexes('abra cadabra', 'abra') => [0, 8]")
if __name__ == '__main__':
# main()
# indexes1 = find_all_indexes('abcabcabc', 'abc')
# print("result => [0, 3, 6]: ", indexes1)
indexes2 = find_all_indexes('abcabcdabcde', 'abcd')
print("indexes2 => [3, 7]: ", indexes2)
| [
2,
0,
29412,
628,
198,
4299,
4909,
7,
5239,
11,
3912,
2599,
198,
220,
220,
220,
37227,
13615,
257,
25131,
12739,
1771,
3912,
8833,
287,
2420,
526,
15931,
198,
220,
220,
220,
6818,
318,
39098,
7,
5239,
11,
965,
828,
705,
5239,
318,
... | 2.62 | 1,250 |
import StringIO
###
def CompileToAST(init_rule, stmt, cmodule):
'''
Used by AST node during transformation to create new AST nodes
'''
infile = StringIO.StringIO(stmt)
import striga.compiler.scanner as scanner
tokens = scanner.Scan(infile.readline)
import striga.compiler.astbuilder as astbuilder
parser = astbuilder.ASTBuilder(start = init_rule)
ast = parser.parse(tokens)
ast.Transform(cmodule)
return ast | [
11748,
10903,
9399,
201,
198,
201,
198,
21017,
201,
198,
201,
198,
4299,
3082,
576,
2514,
11262,
7,
15003,
62,
25135,
11,
336,
16762,
11,
269,
21412,
2599,
201,
198,
197,
7061,
6,
201,
198,
38052,
416,
29273,
10139,
1141,
13389,
284,
... | 2.658683 | 167 |
from ..betterbot import Member
name = 'avatar'
| [
6738,
11485,
27903,
13645,
1330,
10239,
198,
198,
3672,
796,
705,
615,
9459,
6,
628
] | 3.266667 | 15 |
# Author: Barbaros Cetiner
import os
import cv2
from lib.infer_detector import Infer
import torch
import time
from tqdm import tqdm
import argparse
import csv
import warnings
# Ignore warning messages:
warnings.filterwarnings("ignore")
if __name__ == '__main__':
opt = get_args()
infer(opt) | [
2,
6434,
25,
45952,
418,
327,
316,
7274,
198,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
6738,
9195,
13,
259,
2232,
62,
15255,
9250,
1330,
554,
2232,
198,
11748,
28034,
198,
11748,
640,
198,
6738,
256,
80,
36020,
1330,
256,
80... | 2.809091 | 110 |
# -*- coding: utf-8 -*-
'''
Created on Sep 24, 2013
@author: jin
'''
from django.db import models
import re
from django.core import validators
from django.contrib.auth.models import User
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
41972,
319,
8621,
1987,
11,
2211,
198,
198,
31,
9800,
25,
474,
259,
198,
7061,
6,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
11748,
302,
198,
... | 2.613333 | 75 |
from sofi.ui import Small
| [
6738,
523,
12463,
13,
9019,
1330,
10452,
198
] | 3.25 | 8 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.Qt import *
if __name__ == '__main__':
app = QApplication([])
mw = MainWindow()
mw.show()
app.exec()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
1330,
1635,
... | 2.134021 | 97 |
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
import gettext
from otopi import util
from otopi import plugin
from ovirt_engine_setup.dwh import constants as odwhcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import dwh_history_timekeeping as \
engine_db_timekeeping
@util.export
# vim: expandtab tabstop=4 shiftwidth=4
| [
2,
198,
2,
19643,
2265,
12,
18392,
12,
40406,
1377,
19643,
2265,
3113,
9058,
198,
2,
198,
2,
15069,
267,
53,
2265,
46665,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
628,
198,
11748,... | 3.059603 | 151 |
#encoding:utf-8
import importlib
import time
from datetime import datetime
import random
import logging
from utils import SupplyResult
from utils.get_all_admins import get_admins_list
from utils.tech import get_dev_channel, get_all_submodules, get_last_members_cnt
subreddit = 'all'
t_channel = get_dev_channel()
| [
2,
12685,
7656,
25,
40477,
12,
23,
198,
198,
11748,
1330,
8019,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
4738,
198,
11748,
18931,
198,
198,
6738,
3384,
4487,
1330,
22663,
23004,
198,
6738,
3384,
4487,
13,
... | 3.117647 | 102 |
from contextlib import contextmanager
import os
import tempfile
@contextmanager
def safewrite(path, mode='w'):
"""
Open a temporary file and replace it with `path` upon close.
Examples
--------
.. Run the code below in a clean temporary directory:
>>> getfixture('cleancwd')
>>> with open('data.txt', 'w') as f:
... _ = f.write('original content')
>>> with safewrite('data.txt') as f:
... _ = f.write(str(1 / 0)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ZeroDivisionError: ...
>>> with open('data.txt') as f:
... print(f.read())
original content
If it were a normal `open`, then the original content would be
wiped out.
>>> with open('data.txt', 'w') as f:
... _ = f.write(str(1 / 0)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ZeroDivisionError: ...
>>> with open('data.txt') as f:
... print(f.read())
<BLANKLINE>
"""
abspath = os.path.abspath(path)
base = os.path.basename(abspath)
dir = os.path.dirname(abspath)
try:
with tempfile.NamedTemporaryFile(mode=mode, prefix=base, dir=dir,
delete=False) as tmpf:
yield tmpf
os.rename(tmpf.name, abspath)
finally:
if os.path.exists(tmpf.name):
os.unlink(tmpf.name)
| [
6738,
4732,
8019,
1330,
4732,
37153,
198,
11748,
28686,
198,
11748,
20218,
7753,
628,
198,
31,
22866,
37153,
198,
4299,
1932,
413,
6525,
7,
6978,
11,
4235,
11639,
86,
6,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4946,
257,
... | 2.211982 | 651 |
#!/usr/bin/env python3
import os
import sys
import argparse
import json
from random import randint
MAXINT = sys.maxsize
MININT = -sys.maxsize
ROW_SIZE_SAMPLE_POINTS = 1024
if __name__ == "__main__":
main()
"""
wbs_dir=/ufs/bogdan/work/master-project/public_bi_benchmark-master_project/benchmark
max_sample_size=$((1024*1024*10))
dataset_nb_rows=20
./main.py --dataset-nb-rows $dataset_nb_rows --max-sample-size $max_sample_size --sample-block-nb-rows 2 --output-file ./output/output.csv $wbs_dir/Arade/samples/Arade_1.sample.csv
================================================================================
wbs_dir=/scratch/bogdan/tableau-public-bench/data/PublicBIbenchmark-test
max_sample_size=$((1024*1024*10))
dataset_nb_rows=9888775
./main.py --dataset-nb-rows $dataset_nb_rows --max-sample-size $max_sample_size --sample-block-nb-rows 32 --output-file ./output/output.csv $wbs_dir/Arade/Arade_1.csv
dataset_nb_rows=9624351
./main.py --dataset-nb-rows $dataset_nb_rows --max-sample-size $max_sample_size --sample-block-nb-rows 32 --output-file ./output/output.csv $wbs_dir/NYC/NYC_1.csv
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
6738,
4738,
1330,
43720,
600,
628,
198,
22921,
12394,
796,
25064,
13,
9806,
7857,
198,
23678,
1... | 2.640662 | 423 |
version = '1.6.0'
git_version = 'b0d483c1266e6822c730d35b39bff3d9b92c6648'
| [
198,
9641,
796,
705,
16,
13,
21,
13,
15,
6,
198,
18300,
62,
9641,
796,
705,
65,
15,
67,
38783,
66,
1065,
2791,
68,
3104,
1828,
66,
43916,
67,
2327,
65,
2670,
65,
487,
18,
67,
24,
65,
5892,
66,
21,
34287,
6,
198
] | 1.727273 | 44 |
""" All Measurements are either explicit or implicit """
""" Specific Measurement Enumerations """
########## GPS ##########
########## Azimuth ##########
########## Elevation ##########
########## Range ##########
########## Linear Relation ##########
########## Velocity ##########
########## Debug GPS of Neighbors ##########
# These measurements are impossible in reality but useful in debugging
# They represent "src took a gps measurement of neighbor" (e.g. I went to my neighbor's exact location and took a gps measurement for them)
""" Adding a new measurement Steps
Add it above & its implicit counterpart
Is it an angle? Add self.is_angle_meas = True to its constructor
Add its jacobian to get_measurement_jacobian() in measurement_jacobians.py
If it has a nonlinear measurement function, add it to get_nonlinear_expected_meas() in measurement_expected.py
Add its implicit conversion to asset.py
""" | [
198,
37811,
1439,
24291,
902,
389,
2035,
7952,
393,
16992,
37227,
198,
198,
37811,
17377,
24291,
434,
2039,
6975,
602,
37227,
198,
198,
7804,
2235,
15472,
1303,
7804,
2,
198,
198,
7804,
2235,
7578,
320,
1071,
1303,
7804,
2,
198,
198,
... | 3.881857 | 237 |
import argparse
from collections import namedtuple
import glob
import numpy as np
import os
import re
import struct
import json
import pandas as pd
from collections import OrderedDict
from tqdm.auto import tqdm
class Tags(object):
"""
Represents tag name to id mapping from a header file.
Preprocessor macros are held in the structure:
tag_names: Dict<str, int>
We also reverse index for faster subsequent parsing:
tag_ids: Dict<int, str>
"""
class Log(object):
"""
Represents a single log file.
log: List<Tuple<int, int>>
Also creates a human-readable version with tag names instead of IDs.
This could hit performance issues for large log files.
Preferably use IDs instead.
log_h: List<Tuple<string, int>>
"""
def make_human_readable(self):
''' Substitute tags for tag IDs to create a human-readable log '''
assert all([tag_id in self.tags.tag_ids for (tag_id, value) in self.log])
self.log_h = [(self.tags.tag_ids[tag_id], value) for (tag_id, value) in self.log]
# for tag, value in self.log_h:
# print(f'{tag}: {value}')
def check_version(self):
"""
Assert logged version matches version from header file
"""
try:
self.version = next(value for (tag, value) in self.log_h if tag == 'version')
except StopIteration:
raise ValueError('no version tag found')
if self.version is not self.tags.tag_names['version']:
raise ValueError(f"expected version {self.tags.tag_names['version']}, got {self.version}")
def get_wall_clock_durations(self, name: str):
''' Automatically appends the wc_ prefix and _begin or _end suffixes'''
begin_full_name = f'wc_{name}_begin'
end_full_name = f'wc_{name}_end'
begin_tag_id = self.tags.tag_names[begin_full_name]
end_tag_id = self.tags.tag_names[end_full_name]
begins = [value for (tag_id, value) in self.log if tag_id == begin_tag_id]
ends = [value for (tag_id, value) in self.log if tag_id == end_tag_id]
assert len(begins) == len(ends)
# Some durations may wrap around, but check whether all fit in half-range
diffs = [(ends[i] - begins[i]) % (1 << 32) for i in range(len(begins))]
assert all([d < (1 << 31) for d in diffs])
durations = [float(d) / 1e6 * 1e3 for d in diffs]
return durations
def get_cpu_clock_durations(self, name: str):
''' Automatically appends the cc_ prefix and _begin or _end suffixes'''
begin_full_name = f'cc_{name}_begin'
end_full_name = f'cc_{name}_end'
begin_tag_id = self.tags.tag_names[begin_full_name]
end_tag_id = self.tags.tag_names[end_full_name]
begins = [value for (tag_id, value) in self.log if tag_id == begin_tag_id]
ends = [value for (tag_id, value) in self.log if tag_id == end_tag_id]
assert len(begins) == len(ends)
# Some durations may wrap around, but check whether all fit in half-range
diffs = [(ends[i] - begins[i]) % (1 << 32) for i in range(len(begins))]
assert all([d < (1 << 31) for d in diffs])
durations = [float(d) / self.clocks_per_sec * 1e3 for d in diffs]
return durations
class Epochs(object):
"""
Represents epochs across one or more logs.
"""
# class Epoch(object):
# self.begin = None
# self.end = None
# self.best_fitness = None
# Old depreviated verison
# Given a log_dir (generated with a leonhard run) and a name, saves a dataframe to name.gz
# That dataframe contains the epochs, wall clock times, fitness, rep, rank and all the variable parameters
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process GA logs')
parser.add_argument('--tags', help='path to C header file with log defines')
parser.add_argument('--log', help='path to log file')
parser.add_argument('--dir', help='path to directory with log files')
args = parser.parse_args()
# Parse header file with tag definitions
tags_fn = os.path.join('logging', 'tags.hpp') if not args.tags else args.tags
tags = Tags(tags_fn)
# print(tags)
# parse binary log
log_fns = (
[args.log] if args.log else
logs_in_dir(args.dir) if args.dir else
[last_log()]
)
logs = [Log(log_fn, tags) for log_fn in log_fns]
epochss = [Epochs(log, tags) for log in logs]
from matplotlib import pyplot as plt
for epochs in epochss:
fitness, time = epochs.get_fitness_vs_time()
plt.plot([t/1e3 for t in time], fitness)
plt.xlabel('time [s]')
plt.ylabel('distance')
plt.show()
for log in logs:
print(log.fn)
print(' total:',
f"wall clock {log.get_wall_clock_durations('logging')[0]:.3f}ms",
f"CPU clock {log.get_cpu_clock_durations('logging')[0]:.3f}ms"
)
print_stats('epoch')
print_stats('rank_individuals')
print_stats('breed_population')
print_stats('mutate_population')
| [
11748,
1822,
29572,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
2878,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
... | 2.424528 | 2,120 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
"""
Django local settings for the NhbApps project.
This file is included from settings.py and contains specific
settings that can be changed as part of a deployment, without
having to edit the settings.py file.
"""
# the secret below ensures an adversary cannot fake aspects like a session-id
# just make sure it is unique per installation and keep it private
# details: https://docs.djangoproject.com/en/2.2/ref/settings/#secret-key
SECRET_KEY = '1234-replace-with-your-own-secret-key-56789abcdefg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ENABLE_DEBUG_TOOLBAR = False
SITE_URL = "https://yourdomain.com" # used by Overige:tijdelijke urls and SAML2
ALLOWED_HOSTS = [
'127.0.0.1'
]
IS_TEST_SERVER = False
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'database-name',
'USER': 'database-user',
'PASSWORD': 'database-pwd',
'HOST': 'localhost',
'PORT': '5432'
}
}
# the issuer name that is sent to the OTP application in the QR code
OTP_ISSUER_NAME = "yourdomain.com"
NAAM_SITE = "YourSite (dev)"
EMAIL_BONDSBURO = "info@handboogsport.nl"
# sending email
#POSTMARK_URL = 'https://api.postmarkapp.com/email'
#POSTMARK_API_KEY = 'postmark private api key'
#EMAIL_FROM_ADDRESS = 'noreply@yourdomain.com' # zie ook https://nl.wikipedia.org/wiki/Noreply
EMAIL_DEVELOPER_TO = 'developer@yourdomain.com'
EMAIL_DEVELOPER_SUBJ = 'Internal Server Error: ' + NAAM_SITE
# users allowed to send to in this test setup
# if empty, allows sending to anybody
EMAIL_ADDRESS_WHITELIST = ()
# url van het document privacyverklaring
PRIVACYVERKLARING_URL = 'url to privacy statement html, pdf or googledoc, etc'
# url van het document met voorwaarden voor A-status wedstrijden
VOORWAARDEN_A_STATUS_URL = 'https://docs.google.com/document/d/random google document number/view'
# google doc id van het gsheet document
RECORDS_GSHEET_FILE_ID = 'random google document number'
# door de naam van een sheet te gebruiken as 'Range' krijg je alle cellen uit de sheet
RECORDS_GSHEET_SHEET_NAMES = [
'Data individueel outdoor',
'Data individueel indoor',
'Data individueel 25m1pijl',
'Data team'
]
# use static manual pages (wiki is for the test server only)
ENABLE_WIKI = False
# ondersteuning van de Wiki met SSO via de IdP, of ingebouwde handleiding?
WIKI_URL = 'http://wiki.yourdomain.com'
# vertaling van tijdelijke (99xxxx nummers) naar correcte NHB nummer
MAP_99_NRS = {
990001: 1234567,
}
# end of file
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
220,
15069,
357,
66,
8,
13130,
12,
1238,
2481,
7431,
261,
5719,
4587,
7178,
7750,
13,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
220,
49962,
739,
347,
10305... | 2.633737 | 1,073 |
"""Universal configurator.
Allows the custom project to have configuration written in TOML or
JSON file and easily be converted to the class with same hierarchy
of values.
"""
import json
import pytomlpp
import libnacl.secret as crypter
from pathlib import Path
from libnacl.utils import load_key
from typing import Union, Any, Tuple, Callable, Iterator, Iterable, Type
from functools import update_wrapper
from inspect import getmembers, isroutine
DATA_PATH = Path.home().joinpath(".everynet/")
DEFAULT_SECRET_FILE = DATA_PATH.joinpath("secret.key")
DEFAULT_CFG_FILE = DATA_PATH.joinpath("config")
RESERVED = ["name"]
class MapEncoder(json.JSONEncoder):
"""json.JSONEncoder extender.
Recursively scan object replacing classes with dictionary of class
attributes.
"""
def process_cls(self, obj):
"""Convert classes in object into dictionary of class attributes.
Recursive lookup.
"""
if hasattr(obj, "__dict__"):
attrs = [
a
for a in getmembers(obj, lambda a: not (isroutine(a)))
if not (a[0].startswith("_") and a[0].endswith("_"))
]
rv = {}
for k, v in attrs:
if hasattr(v, "__dict__"):
rv[k] = self.process_cls(v)
else:
rv[k] = v
return rv
else:
return str(obj)
class BaseConfig(object):
"""Type container. Parent class for config."""
def update(
self, data: dict = None, processor_cb: Callable[[str, Any], Any] = None, **kwargs: Any
) -> None:
"""Set config instance attributes.
Must be used instead of set_attr or setattr, because it also
stores raw data for save - restore.
Parameters:
data [dict] Dictionary of attributes {name: value} to be set. Optional,
`name=value` could be set as keyword arguments.
Defaults to None.
processor_cb [Callable] value processor callback
"""
data = dict(data, **kwargs) if data else dict(**kwargs)
self.set_attr(data, processor_cb)
@staticmethod
def convert_path(key: str, val: Any) -> Any:
"""Resolve relative path and exapand user home sign `~`.
Return:
str Posix style path
"""
return Path(val).expanduser().resolve().as_posix() if "path" in key else val
@staticmethod
def path_resolve(_d: dict) -> dict:
"""Recursively convert pathes in dict
Return:
dict With converted paths
"""
rv = {}
for k, v in _d.items():
if isinstance(v, dict):
v = BaseConfig.path_resolve(v)
else:
v = BaseConfig.convert_path(k, v)
rv.update({k: v})
return rv
def set_attr(self, data: dict = {}, processor_cb: Callable[[str, Any], Any] = None) -> None:
"""Set instance attributes.
Resolve paths and call `processor_cb` callback function if provided.
Args:
data[dict] dictionary of attribute's {name: value}
processor_cb[Callable] Value processor callback. It's return value will
be set if method provided.
"""
for k, v in data.items():
val = BaseConfig.convert_path(k, v)
setattr(self, k, processor_cb(k, val) if processor_cb else v)
def add_cls_attr(self, name: str, attrs: dict = {}) -> None:
"""Create new type and add it's instance as attribute.
Args:
name [str] Attribute name and class name (capitilized).
attrs [dict] New class atrributes.
"""
cls_attr = type(name.capitalize(), (BaseConfig,), attrs)
self.update({name: cls_attr()})
def configurable(
f,
load_path: Union[str, Path] = None,
save_path: Union[str, Path] = None,
key_path: Union[str, Path] = None,
) -> Callable[[Config, Any], Any]:
"""
Decorator to transorm function to receive existed Config() object.
Arguments are the same as for Config class.
"""
cli_config = Config(load_path=load_path, save_path=save_path, key_path=key_path)
return update_wrapper(new_func, f)
| [
37811,
38747,
4566,
333,
1352,
13,
220,
198,
198,
34934,
262,
2183,
1628,
284,
423,
8398,
3194,
287,
41526,
43,
393,
198,
40386,
2393,
290,
3538,
307,
11513,
284,
262,
1398,
351,
976,
18911,
198,
1659,
3815,
13,
198,
37811,
198,
11748... | 2.337719 | 1,824 |
from typing import List, Tuple
if __name__ == "__main__":
# print(mas([3, 7, 4, 6, 5, 9]))
assert max_array_sum([3, 7, 4, 6, 5]) == 13
assert max_array_sum([2, 1, 5, 8, 4]) == 11
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1303,
3601,
7,
5356,
26933,
18,
11,
767,
11,
604,
11,
718,
11,
642,
11,
860,
60,
4008,
198,
220,
... | 2.191011 | 89 |
# palindrome index checker
a = 'hgygsvlfcwnswtuhmyaljkqlqjjqlqkjlaymhutwsnwcwflvsgygh'
c = a[::-1]
print c
l = len(a) - 1
print l
for i in range(0, len(a) / 2):
if a == a[::-1]:
print -1
elif a[i] != a[l - i]:
if (a[i + 1] == a[l - i]) and (a[i + 2] == a[l - i - 1]):
print i
i += 1
elif (a[i] == a[l - i - 1]) and (a[i + 2] == a[l - i - 2]):
print l - i
i += 1
"""
a = 0
while a < 6:
for i in range(9):
if i == 3:
break
print "3 is encountered"
a += 1
print "loop ended!"
"""
| [
2,
6340,
521,
5998,
6376,
2198,
263,
201,
198,
201,
198,
201,
198,
64,
796,
705,
71,
1360,
14542,
85,
1652,
66,
675,
2032,
83,
7456,
1820,
282,
73,
74,
13976,
80,
41098,
13976,
80,
42421,
10724,
76,
71,
315,
86,
16184,
86,
66,
8... | 1.635659 | 387 |
from random import randint, Random
import uuid
import re
import os
from pyArango.connection import Connection
from pyArango.theExceptions import CreationError
from stix2 import parse
from stix2arango.exceptions import MergeFailedException
SPECIAL_CHARS = '[()]=<>'
STRING_CHARS = '"\''
SEPARATOR_CHARS = ' \t'
def update_id_for_sdo(sdo):
"""Update sdo id with a reproducible uuid base on fields
Args:
sdo (sdo): Stix sdo object
Returns:
sdo: updated sdo stix object
"""
if sdo.type == 'relationship':
raise TypeError('object should not be a relationship')
sdo = dict(sdo)
exclude_field = ['created', 'modified', 'spec_version', 'id']
seed = {k:v for k,v in sdo.items() \
if 'ref' not in k and k[:2] != 'x_' and k not in exclude_field}
rd = Random()
rd.seed(str(seed))
_id = uuid.UUID(int=rd.getrandbits(128), version=4)
sdo['id'] = sdo['type'] + "--" + str(_id)
return parse(sdo, allow_custom=True)
def update_uid_for_obj_list(l_obj):
"""Replace sdo id by deterministic id and replace id in relations and references
Args:
l_obj (list) : list of stix objects
Returns:
list: list of updated sdo stix object
"""
id_transform = {}
updated_l_obj = []
for sdo in l_obj:
if sdo.type != 'relationship':
old_id = sdo.id
sdo = update_id_for_sdo(sdo)
new_id = sdo.id
id_transform[old_id] = new_id
updated_l_obj.append(dict(sdo))
for obj in updated_l_obj:
for key, value in obj.items():
if key.endswith('ref'):
if value in id_transform:
obj[key] = id_transform[value]
if key.endswith('refs'):
obj[key] = [ id_transform[v] if v in id_transform else v for v in obj[key] ]
return [parse(obj) for obj in updated_l_obj]
import collections
def deep_dict_update(source, overrides):
"""
Update a nested dictionary or similar mapping.
Modify ``source`` in place.
"""
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = deep_dict_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source | [
6738,
4738,
1330,
43720,
600,
11,
14534,
198,
11748,
334,
27112,
198,
11748,
302,
198,
11748,
28686,
198,
198,
6738,
12972,
3163,
14208,
13,
38659,
1330,
26923,
198,
6738,
12972,
3163,
14208,
13,
1169,
3109,
11755,
1330,
21582,
12331,
198... | 2.285992 | 1,028 |
#
# PySNMP MIB module ZYXEL-MLD-SNOOPING-PROXY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-MLD-SNOOPING-PROXY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:44:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint")
dot1dBasePort, = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePort")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
PortList, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "PortList")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, NotificationType, Gauge32, Counter64, ObjectIdentity, Unsigned32, IpAddress, Counter32, TimeTicks, MibIdentifier, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "NotificationType", "Gauge32", "Counter64", "ObjectIdentity", "Unsigned32", "IpAddress", "Counter32", "TimeTicks", "MibIdentifier", "ModuleIdentity")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt")
zyxelMldSnoopingProxy = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51))
if mibBuilder.loadTexts: zyxelMldSnoopingProxy.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts: zyxelMldSnoopingProxy.setOrganization('Enterprise Solution ZyXEL')
zyxelMldSnoopingProxyFilteringSetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1))
zyxelMldSnoopingProxyStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2))
zyxelMldSnoopingProxySetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3))
zyxelMldSnoopingProxyMembershipStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4))
zyMldSnoopingProxyFilteringMaxNumberOfProfiles = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringMaxNumberOfProfiles.setStatus('current')
zyxelMldSnoopingProxyFilteringProfileTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyFilteringProfileTable.setStatus('current')
zyxelMldSnoopingProxyFilteringProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2, 1), ).setIndexNames((0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyFilteringProfileName"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyFilteringProfileStartIpAddressType"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyFilteringProfileStartIpAddress"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyFilteringProfileEndIpAddressType"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyFilteringProfileEndIpAddress"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyFilteringProfileEntry.setStatus('current')
zyMldSnoopingProxyFilteringProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2, 1, 1), OctetString())
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringProfileName.setStatus('current')
zyMldSnoopingProxyFilteringProfileStartIpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2, 1, 2), InetAddressType())
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringProfileStartIpAddressType.setStatus('current')
zyMldSnoopingProxyFilteringProfileStartIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2, 1, 3), InetAddress())
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringProfileStartIpAddress.setStatus('current')
zyMldSnoopingProxyFilteringProfileEndIpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2, 1, 4), InetAddressType())
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringProfileEndIpAddressType.setStatus('current')
zyMldSnoopingProxyFilteringProfileEndIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2, 1, 5), InetAddress())
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringProfileEndIpAddress.setStatus('current')
zyMldSnoopingProxyFilteringProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringProfileRowStatus.setStatus('current')
zyxelMldSnoopingProxyFilteringPortTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 3), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyFilteringPortTable.setStatus('current')
zyxelMldSnoopingProxyFilteringPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 3, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyFilteringPortEntry.setStatus('current')
zyMldSnoopingProxyFilteringPortProfile = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 3, 1, 1), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringPortProfile.setStatus('current')
zyMldSnoopingProxyFilteringPortGroupLimitState = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 3, 1, 2), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringPortGroupLimitState.setStatus('current')
zyMldSnoopingProxyFilteringPortMaxNumberOfGroups = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 1, 3, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringPortMaxNumberOfGroups.setStatus('current')
zyMldSnoopingProxySysStatisticsV1QueryRx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1QueryRx.setStatus('current')
zyMldSnoopingProxySysStatisticsV1QueryTx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1QueryTx.setStatus('current')
zyMldSnoopingProxySysStatisticsV1QueryDrop = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1QueryDrop.setStatus('current')
zyMldSnoopingProxySysStatisticsV1ReportRx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1ReportRx.setStatus('current')
zyMldSnoopingProxySysStatisticsV1ReportTx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1ReportTx.setStatus('current')
zyMldSnoopingProxySysStatisticsV1ReportDrop = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1ReportDrop.setStatus('current')
zyMldSnoopingProxySysStatisticsV1DoneRx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1DoneRx.setStatus('current')
zyMldSnoopingProxySysStatisticsV1DoneTx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1DoneTx.setStatus('current')
zyMldSnoopingProxySysStatisticsV1DoneDrop = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV1DoneDrop.setStatus('current')
zyMldSnoopingProxySysStatisticsV2QueryRx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV2QueryRx.setStatus('current')
zyMldSnoopingProxySysStatisticsV2QueryTx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV2QueryTx.setStatus('current')
zyMldSnoopingProxySysStatisticsV2QueryDrop = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV2QueryDrop.setStatus('current')
zyMldSnoopingProxySysStatisticsV2ReportRx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV2ReportRx.setStatus('current')
zyMldSnoopingProxySysStatisticsV2ReportTx = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV2ReportTx.setStatus('current')
zyMldSnoopingProxySysStatisticsV2ReportDrop = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxySysStatisticsV2ReportDrop.setStatus('current')
zyxelMldSnoopingProxyStatisticsVlanTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyStatisticsVlanTable.setStatus('current')
zyxelMldSnoopingProxyStatisticsVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1), ).setIndexNames((0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyVlanVid"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyStatisticsVlanEntry.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1QueryRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1QueryRx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1QueryTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1QueryTx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1QueryDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1QueryDrop.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1ReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1ReportRx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1ReportTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1ReportTx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1ReportDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1ReportDrop.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1DoneRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1DoneRx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1DoneTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1DoneTx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV1DoneDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV1DoneDrop.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV2QueryRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV2QueryRx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV2QueryTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV2QueryTx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV2QueryDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV2QueryDrop.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV2ReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV2ReportRx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV2ReportTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV2ReportTx.setStatus('current')
zyMldSnoopingProxyStatisticsVlanV2ReportDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 16, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsVlanV2ReportDrop.setStatus('current')
zyxelMldSnoopingProxyStatisticsPortTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyStatisticsPortTable.setStatus('current')
zyxelMldSnoopingProxyStatisticsPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyStatisticsPortEntry.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1QueryRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1QueryRx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1QueryTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1QueryTx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1QueryDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1QueryDrop.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1ReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1ReportRx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1ReportTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1ReportTx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1ReportDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1ReportDrop.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1DoneRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1DoneRx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1DoneTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1DoneTx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV1DoneDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV1DoneDrop.setStatus('current')
zyMldSnoopingProxyStatisticsPortV2QueryRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV2QueryRx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV2QueryTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV2QueryTx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV2QueryDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV2QueryDrop.setStatus('current')
zyMldSnoopingProxyStatisticsPortV2ReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV2ReportRx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV2ReportTx = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV2ReportTx.setStatus('current')
zyMldSnoopingProxyStatisticsPortV2ReportDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 17, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsPortV2ReportDrop.setStatus('current')
zyMldSnoopingProxyStatisticsClear = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 18), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsClear.setStatus('current')
zyMldSnoopingProxyStatisticsClearSystem = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 19), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsClearSystem.setStatus('current')
zyMldSnoopingProxyStatisticsClearPort = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 20), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsClearPort.setStatus('current')
zyMldSnoopingProxyStatisticsClearVlan = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 2, 21), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyStatisticsClearVlan.setStatus('current')
zyMldSnoopingProxyState = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyState.setStatus('current')
zyMldSnoopingProxyFilteringState = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 2), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyFilteringState.setStatus('current')
zyMldSnoopingProxy8021pPriority = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxy8021pPriority.setStatus('current')
zyMldSnoopingProxyMaxNumberOfVlans = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyMaxNumberOfVlans.setStatus('current')
zyxelMldSnoopingProxyVlanTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 5), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyVlanTable.setStatus('current')
zyxelMldSnoopingProxyVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 5, 1), ).setIndexNames((0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyVlanVid"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyVlanEntry.setStatus('current')
zyMldSnoopingProxyVlanVid = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 5, 1, 1), Integer32())
if mibBuilder.loadTexts: zyMldSnoopingProxyVlanVid.setStatus('current')
zyMldSnoopingProxyVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 5, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: zyMldSnoopingProxyVlanRowStatus.setStatus('current')
zyxelMldSnoopingProxyUpstreamVlanTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 6), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyUpstreamVlanTable.setStatus('current')
zyxelMldSnoopingProxyUpstreamVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 6, 1), ).setIndexNames((0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyVlanVid"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyUpstreamVlanEntry.setStatus('current')
zyMldSnoopingProxyUpstreamVlanPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 6, 1, 1), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyUpstreamVlanPorts.setStatus('current')
zyMldSnoopingProxyUpstreamVlanQueryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1000, 31744000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyUpstreamVlanQueryInterval.setStatus('current')
zyMldSnoopingProxyUpstreamVlanMaxResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1000, 25000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyUpstreamVlanMaxResponseTime.setStatus('current')
zyMldSnoopingProxyUpstreamVlanRobustness = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 6, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyUpstreamVlanRobustness.setStatus('current')
zyMldSnoopingProxyUpstreamVlanLastMemberQueryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8387584))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyUpstreamVlanLastMemberQueryInterval.setStatus('current')
zyxelMldSnoopingProxyDownstreamVlanTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 7), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyDownstreamVlanTable.setStatus('current')
zyxelMldSnoopingProxyDownstreamVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 7, 1), ).setIndexNames((0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyVlanVid"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyDownstreamVlanEntry.setStatus('current')
zyMldSnoopingProxyDownstreamVlanPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 7, 1, 1), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyDownstreamVlanPorts.setStatus('current')
zyMldSnoopingProxyDownstreamVlanQueryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1000, 31744000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyDownstreamVlanQueryInterval.setStatus('current')
zyMldSnoopingProxyDownstreamVlanMaxResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1000, 25000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyDownstreamVlanMaxResponseTime.setStatus('current')
zyxelMldSnoopingProxyDownstreamVlanPortTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 8), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyDownstreamVlanPortTable.setStatus('current')
zyxelMldSnoopingProxyDownstreamVlanPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 8, 1), ).setIndexNames((0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyVlanVid"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyDownstreamVlanPortIndex"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyDownstreamVlanPortEntry.setStatus('current')
zyMldSnoopingProxyDownstreamVlanPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 8, 1, 1), Integer32())
if mibBuilder.loadTexts: zyMldSnoopingProxyDownstreamVlanPortIndex.setStatus('current')
zyMldSnoopingProxyDownstreamVlanPortLeaveMode = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("immediate", 0), ("normal", 1), ("fast", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyDownstreamVlanPortLeaveMode.setStatus('current')
zyMldSnoopingProxyDownstreamVlanPortLeaveTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 8, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyDownstreamVlanPortLeaveTimeout.setStatus('current')
zyMldSnoopingProxyDownstreamVlanPortFastLeaveTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 3, 8, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyMldSnoopingProxyDownstreamVlanPortFastLeaveTimeout.setStatus('current')
zyxelMldSnoopingProxyMembershipTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4, 1), )
if mibBuilder.loadTexts: zyxelMldSnoopingProxyMembershipTable.setStatus('current')
zyxelMldSnoopingProxyMembershipEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4, 1, 1), ).setIndexNames((0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyMembershipVid"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyMembershipPort"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyMembershipGroupIpAddressType"), (0, "ZYXEL-MLD-SNOOPING-PROXY-MIB", "zyMldSnoopingProxyMembershipGroupIpAddress"))
if mibBuilder.loadTexts: zyxelMldSnoopingProxyMembershipEntry.setStatus('current')
zyMldSnoopingProxyMembershipVid = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: zyMldSnoopingProxyMembershipVid.setStatus('current')
zyMldSnoopingProxyMembershipPort = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4, 1, 1, 2), Integer32())
if mibBuilder.loadTexts: zyMldSnoopingProxyMembershipPort.setStatus('current')
zyMldSnoopingProxyMembershipGroupIpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4, 1, 1, 3), InetAddressType())
if mibBuilder.loadTexts: zyMldSnoopingProxyMembershipGroupIpAddressType.setStatus('current')
zyMldSnoopingProxyMembershipGroupIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4, 1, 1, 4), InetAddress())
if mibBuilder.loadTexts: zyMldSnoopingProxyMembershipGroupIpAddress.setStatus('current')
zyMldSnoopingProxyMembershipGroupTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 51, 4, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zyMldSnoopingProxyMembershipGroupTimeout.setStatus('current')
mibBuilder.exportSymbols("ZYXEL-MLD-SNOOPING-PROXY-MIB", zyMldSnoopingProxyStatisticsVlanV2ReportRx=zyMldSnoopingProxyStatisticsVlanV2ReportRx, zyMldSnoopingProxyMembershipGroupIpAddressType=zyMldSnoopingProxyMembershipGroupIpAddressType, zyMldSnoopingProxyFilteringProfileStartIpAddressType=zyMldSnoopingProxyFilteringProfileStartIpAddressType, zyxelMldSnoopingProxyFilteringProfileTable=zyxelMldSnoopingProxyFilteringProfileTable, zyMldSnoopingProxySysStatisticsV1QueryTx=zyMldSnoopingProxySysStatisticsV1QueryTx, zyxelMldSnoopingProxyStatistics=zyxelMldSnoopingProxyStatistics, zyMldSnoopingProxyStatisticsClearSystem=zyMldSnoopingProxyStatisticsClearSystem, zyMldSnoopingProxyMaxNumberOfVlans=zyMldSnoopingProxyMaxNumberOfVlans, zyMldSnoopingProxyStatisticsClear=zyMldSnoopingProxyStatisticsClear, zyMldSnoopingProxyFilteringPortMaxNumberOfGroups=zyMldSnoopingProxyFilteringPortMaxNumberOfGroups, zyxelMldSnoopingProxyUpstreamVlanEntry=zyxelMldSnoopingProxyUpstreamVlanEntry, zyxelMldSnoopingProxyMembershipTable=zyxelMldSnoopingProxyMembershipTable, zyMldSnoopingProxyFilteringProfileRowStatus=zyMldSnoopingProxyFilteringProfileRowStatus, zyMldSnoopingProxySysStatisticsV1DoneDrop=zyMldSnoopingProxySysStatisticsV1DoneDrop, zyxelMldSnoopingProxyMembershipEntry=zyxelMldSnoopingProxyMembershipEntry, zyMldSnoopingProxyStatisticsPortV1ReportRx=zyMldSnoopingProxyStatisticsPortV1ReportRx, zyMldSnoopingProxyStatisticsClearVlan=zyMldSnoopingProxyStatisticsClearVlan, zyMldSnoopingProxySysStatisticsV2ReportTx=zyMldSnoopingProxySysStatisticsV2ReportTx, zyMldSnoopingProxyStatisticsPortV2QueryDrop=zyMldSnoopingProxyStatisticsPortV2QueryDrop, zyxelMldSnoopingProxy=zyxelMldSnoopingProxy, zyxelMldSnoopingProxyMembershipStatus=zyxelMldSnoopingProxyMembershipStatus, zyMldSnoopingProxyMembershipVid=zyMldSnoopingProxyMembershipVid, zyMldSnoopingProxyVlanVid=zyMldSnoopingProxyVlanVid, zyMldSnoopingProxyStatisticsVlanV2QueryTx=zyMldSnoopingProxyStatisticsVlanV2QueryTx, zyMldSnoopingProxyMembershipPort=zyMldSnoopingProxyMembershipPort, zyMldSnoopingProxyDownstreamVlanPorts=zyMldSnoopingProxyDownstreamVlanPorts, zyMldSnoopingProxySysStatisticsV2QueryRx=zyMldSnoopingProxySysStatisticsV2QueryRx, zyMldSnoopingProxyDownstreamVlanPortLeaveTimeout=zyMldSnoopingProxyDownstreamVlanPortLeaveTimeout, zyMldSnoopingProxyDownstreamVlanPortFastLeaveTimeout=zyMldSnoopingProxyDownstreamVlanPortFastLeaveTimeout, zyMldSnoopingProxyStatisticsPortV1ReportDrop=zyMldSnoopingProxyStatisticsPortV1ReportDrop, zyMldSnoopingProxyStatisticsVlanV1DoneTx=zyMldSnoopingProxyStatisticsVlanV1DoneTx, zyxelMldSnoopingProxyDownstreamVlanPortTable=zyxelMldSnoopingProxyDownstreamVlanPortTable, zyxelMldSnoopingProxyVlanEntry=zyxelMldSnoopingProxyVlanEntry, zyMldSnoopingProxyStatisticsClearPort=zyMldSnoopingProxyStatisticsClearPort, zyMldSnoopingProxyStatisticsPortV1DoneDrop=zyMldSnoopingProxyStatisticsPortV1DoneDrop, zyMldSnoopingProxyMembershipGroupIpAddress=zyMldSnoopingProxyMembershipGroupIpAddress, zyxelMldSnoopingProxyFilteringPortTable=zyxelMldSnoopingProxyFilteringPortTable, zyMldSnoopingProxyStatisticsPortV2QueryRx=zyMldSnoopingProxyStatisticsPortV2QueryRx, zyMldSnoopingProxySysStatisticsV1QueryRx=zyMldSnoopingProxySysStatisticsV1QueryRx, zyxelMldSnoopingProxyFilteringSetup=zyxelMldSnoopingProxyFilteringSetup, zyMldSnoopingProxyStatisticsPortV2ReportRx=zyMldSnoopingProxyStatisticsPortV2ReportRx, zyMldSnoopingProxyStatisticsVlanV1ReportDrop=zyMldSnoopingProxyStatisticsVlanV1ReportDrop, PYSNMP_MODULE_ID=zyxelMldSnoopingProxy, zyxelMldSnoopingProxyDownstreamVlanPortEntry=zyxelMldSnoopingProxyDownstreamVlanPortEntry, zyMldSnoopingProxyDownstreamVlanPortLeaveMode=zyMldSnoopingProxyDownstreamVlanPortLeaveMode, zyMldSnoopingProxySysStatisticsV1QueryDrop=zyMldSnoopingProxySysStatisticsV1QueryDrop, zyxelMldSnoopingProxyDownstreamVlanTable=zyxelMldSnoopingProxyDownstreamVlanTable, zyMldSnoopingProxyStatisticsVlanV2QueryDrop=zyMldSnoopingProxyStatisticsVlanV2QueryDrop, zyMldSnoopingProxySysStatisticsV2QueryTx=zyMldSnoopingProxySysStatisticsV2QueryTx, zyMldSnoopingProxyStatisticsVlanV1ReportRx=zyMldSnoopingProxyStatisticsVlanV1ReportRx, zyMldSnoopingProxyDownstreamVlanQueryInterval=zyMldSnoopingProxyDownstreamVlanQueryInterval, zyMldSnoopingProxyFilteringProfileEndIpAddressType=zyMldSnoopingProxyFilteringProfileEndIpAddressType, zyMldSnoopingProxyFilteringMaxNumberOfProfiles=zyMldSnoopingProxyFilteringMaxNumberOfProfiles, zyMldSnoopingProxyFilteringProfileEndIpAddress=zyMldSnoopingProxyFilteringProfileEndIpAddress, zyMldSnoopingProxyStatisticsPortV1DoneRx=zyMldSnoopingProxyStatisticsPortV1DoneRx, zyMldSnoopingProxyStatisticsPortV1ReportTx=zyMldSnoopingProxyStatisticsPortV1ReportTx, zyxelMldSnoopingProxyStatisticsVlanEntry=zyxelMldSnoopingProxyStatisticsVlanEntry, zyMldSnoopingProxyFilteringProfileStartIpAddress=zyMldSnoopingProxyFilteringProfileStartIpAddress, zyMldSnoopingProxyStatisticsVlanV2ReportDrop=zyMldSnoopingProxyStatisticsVlanV2ReportDrop, zyxelMldSnoopingProxyUpstreamVlanTable=zyxelMldSnoopingProxyUpstreamVlanTable, zyMldSnoopingProxySysStatisticsV2ReportRx=zyMldSnoopingProxySysStatisticsV2ReportRx, zyMldSnoopingProxyFilteringPortGroupLimitState=zyMldSnoopingProxyFilteringPortGroupLimitState, zyMldSnoopingProxyStatisticsVlanV1QueryDrop=zyMldSnoopingProxyStatisticsVlanV1QueryDrop, zyMldSnoopingProxyMembershipGroupTimeout=zyMldSnoopingProxyMembershipGroupTimeout, zyMldSnoopingProxySysStatisticsV1ReportDrop=zyMldSnoopingProxySysStatisticsV1ReportDrop, zyxelMldSnoopingProxySetup=zyxelMldSnoopingProxySetup, zyMldSnoopingProxyUpstreamVlanPorts=zyMldSnoopingProxyUpstreamVlanPorts, zyMldSnoopingProxyStatisticsPortV1QueryDrop=zyMldSnoopingProxyStatisticsPortV1QueryDrop, zyMldSnoopingProxyUpstreamVlanLastMemberQueryInterval=zyMldSnoopingProxyUpstreamVlanLastMemberQueryInterval, zyxelMldSnoopingProxyVlanTable=zyxelMldSnoopingProxyVlanTable, zyMldSnoopingProxyFilteringState=zyMldSnoopingProxyFilteringState, zyMldSnoopingProxyStatisticsVlanV2ReportTx=zyMldSnoopingProxyStatisticsVlanV2ReportTx, zyMldSnoopingProxyStatisticsVlanV1ReportTx=zyMldSnoopingProxyStatisticsVlanV1ReportTx, zyMldSnoopingProxyStatisticsVlanV1QueryTx=zyMldSnoopingProxyStatisticsVlanV1QueryTx, zyMldSnoopingProxyVlanRowStatus=zyMldSnoopingProxyVlanRowStatus, zyMldSnoopingProxyStatisticsPortV1QueryRx=zyMldSnoopingProxyStatisticsPortV1QueryRx, zyMldSnoopingProxyDownstreamVlanPortIndex=zyMldSnoopingProxyDownstreamVlanPortIndex, zyMldSnoopingProxySysStatisticsV2ReportDrop=zyMldSnoopingProxySysStatisticsV2ReportDrop, zyMldSnoopingProxyStatisticsVlanV1DoneDrop=zyMldSnoopingProxyStatisticsVlanV1DoneDrop, zyMldSnoopingProxyStatisticsPortV2QueryTx=zyMldSnoopingProxyStatisticsPortV2QueryTx, zyMldSnoopingProxyState=zyMldSnoopingProxyState, zyxelMldSnoopingProxyStatisticsVlanTable=zyxelMldSnoopingProxyStatisticsVlanTable, zyMldSnoopingProxyStatisticsPortV2ReportDrop=zyMldSnoopingProxyStatisticsPortV2ReportDrop, zyMldSnoopingProxySysStatisticsV1DoneTx=zyMldSnoopingProxySysStatisticsV1DoneTx, zyMldSnoopingProxySysStatisticsV1ReportRx=zyMldSnoopingProxySysStatisticsV1ReportRx, zyxelMldSnoopingProxyFilteringPortEntry=zyxelMldSnoopingProxyFilteringPortEntry, zyMldSnoopingProxyStatisticsVlanV1QueryRx=zyMldSnoopingProxyStatisticsVlanV1QueryRx, zyMldSnoopingProxyDownstreamVlanMaxResponseTime=zyMldSnoopingProxyDownstreamVlanMaxResponseTime, zyxelMldSnoopingProxyDownstreamVlanEntry=zyxelMldSnoopingProxyDownstreamVlanEntry, zyMldSnoopingProxyStatisticsVlanV2QueryRx=zyMldSnoopingProxyStatisticsVlanV2QueryRx, zyMldSnoopingProxy8021pPriority=zyMldSnoopingProxy8021pPriority, zyMldSnoopingProxyStatisticsVlanV1DoneRx=zyMldSnoopingProxyStatisticsVlanV1DoneRx, zyMldSnoopingProxyUpstreamVlanRobustness=zyMldSnoopingProxyUpstreamVlanRobustness, zyMldSnoopingProxySysStatisticsV2QueryDrop=zyMldSnoopingProxySysStatisticsV2QueryDrop, zyMldSnoopingProxySysStatisticsV1DoneRx=zyMldSnoopingProxySysStatisticsV1DoneRx, zyMldSnoopingProxyStatisticsPortV1QueryTx=zyMldSnoopingProxyStatisticsPortV1QueryTx, zyMldSnoopingProxyStatisticsPortV1DoneTx=zyMldSnoopingProxyStatisticsPortV1DoneTx, zyMldSnoopingProxyStatisticsPortV2ReportTx=zyMldSnoopingProxyStatisticsPortV2ReportTx, zyMldSnoopingProxyUpstreamVlanQueryInterval=zyMldSnoopingProxyUpstreamVlanQueryInterval, zyxelMldSnoopingProxyStatisticsPortTable=zyxelMldSnoopingProxyStatisticsPortTable, zyMldSnoopingProxySysStatisticsV1ReportTx=zyMldSnoopingProxySysStatisticsV1ReportTx, zyMldSnoopingProxyUpstreamVlanMaxResponseTime=zyMldSnoopingProxyUpstreamVlanMaxResponseTime, zyxelMldSnoopingProxyFilteringProfileEntry=zyxelMldSnoopingProxyFilteringProfileEntry, zyMldSnoopingProxyFilteringPortProfile=zyMldSnoopingProxyFilteringPortProfile, zyMldSnoopingProxyFilteringProfileName=zyMldSnoopingProxyFilteringProfileName, zyxelMldSnoopingProxyStatisticsPortEntry=zyxelMldSnoopingProxyStatisticsPortEntry)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
1168,
56,
55,
3698,
12,
5805,
35,
12,
50,
15285,
3185,
2751,
12,
31190,
34278,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,... | 2.600161 | 13,698 |
#!/usr/bin/env python
import sys
import re
import numpy as np
start=0
filename ='tempController.log'
#print (len(sys.argv))
if(len(sys.argv)==2):
filename=sys.argv[1]
# filename -s start_line
for i in range(1,len(sys.argv)):
if(sys.argv[i]=='-s'):
start=sys.argv[i+1]
#print start
if(filename[0] != '/'):
f = open('/home/coder/coder-dist/coder-base/data/'+filename)
else:
f = open(filename)
#f = open('log.log')
text = f.read()
f.close()
text = text.replace('=',' ')
lines1 = text.split('\n')
#for line in lines1:
# print line
#print
#print(re.search('currentTemp=',lines1))
date=np.array([])
temp=np.array([])
pwm=np.array([])
dir=np.array([])
bme280=np.array([])
lux=np.array([])
cons=np.array([])
ds18b20=np.array([])
tmp=np.array([])
startcol=False
datain=False
#print len(lines1)
#for line in lines1:
#print 'start='+str(start)
if int(start) > int(len(lines1)):
start=len(lines1)
start=int(start)
#print 'start='+str(start)
#print 'len='+str(len(lines1))
for i in range(start*9,len(lines1)):
line=lines1[i];
tmp=np.append(tmp,line[:])
if line.find('--------') >= 0:
startcol=True
datein=False
if line.find('20') >= 0 and startcol==True and datein==False:
date=np.append(date,line[:])
datein=True
if line.find('currentTemp') >= 0:
temp=np.append(temp,line[:])
if line.find('PWM') >= 0:
pwm=np.append(pwm,line[:])
if line.find('hot') >= 0:
dir=np.append(dir,line[:])
if line.find('cool') >= 0:
dir=np.append(dir,line[:])
if line.find('BME280') >= 0:
bme280=np.append(bme280,line[:])
if line.find('currentLux') >= 0:
lux=np.append(lux,line[:])
if line.find('DS18B20') >= 0:
ds18b20=np.append(ds18b20,line[:])
if line.find('Tcondensation') >= 0:
cons=np.append(cons,line[:])
startcol=False
#for i in range(start/9,len(temp)):
# print temp[i]
for i in range(start,len(temp)-1):
# print date[i+1]+' '+temp[i]+' '+pwm[i]+' '+bme280[i]+' '+lux[i]+' '+cons[i]+' '+ds18b20[i]
#print bme2801[i].split(' ')
d = bme280[i].split(' ')
p = pwm[i].split(' ')
dr=0.0
if dir[i].split(' ')[2]=='hot':
dr = 1.0
else:
dr = -1.0
#print dir1[i].split(' ')[2]+' '+'dr '+str(dr)
g = ds18b20[i].split(' ')
t = cons[i].split(' ')
# date temp hum press pwm dir constration AH
print date[i+1]+','+d[5]+','+d[3]+','+d[7]+','+ str(dr*float(p[1])/10.0)+','+g[1]+','+t[5]+','+t[3]
print 'EOF'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
302,
198,
11748,
299,
32152,
355,
45941,
198,
198,
9688,
28,
15,
198,
198,
34345,
796,
6,
29510,
22130,
13,
6404,
6,
198,
2,
4798,
357,
11925,
7,
17597... | 2.096187 | 1,154 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
from base64 import b64encode, b64decode
from pbincli.utils import PBinCLIError
import zlib
# try import AES cipher and check if it has GCM mode (prevent usage of pycrypto)
try:
from Crypto.Cipher import AES
if not hasattr(AES, 'MODE_GCM'):
try:
from Cryptodome.Cipher import AES
from Cryptodome.Random import get_random_bytes
except ImportError:
PBinCLIError("AES GCM mode is not found in imported crypto module.\n" +
"That can happen if you have installed pycrypto.\n\n" +
"We tried to import pycryptodomex but it is not available.\n" +
"Please install it via pip, if you still need pycrypto, by running:\n" +
"\tpip install pycryptodomex\n" +
"... otherwise use separate python environment or uninstall pycrypto:\n" +
"\tpip uninstall pycrypto")
else:
from Crypto.Random import get_random_bytes
except ImportError:
PBinCLIError("Unable import pycryptodome")
CIPHER_ITERATION_COUNT = 100000
CIPHER_SALT_BYTES = 8
CIPHER_BLOCK_BITS = 256
CIPHER_TAG_BITS = 128
| [
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
11,
275,
2414,
12501,
1098,
198,
6738,
279,
8800,
44506,
13,
26791,
1330,
30524,
259,
5097,
40,
12331,
198,
11748,
1976,
8019,
198,
198,
2,
1949,
1330,
34329,
38012,
290,
2198,
611,
340,
4... | 2.371608 | 479 |
# from torch2trt.torch2trt import *
# import torch
# # @tensorrt_converter('torch.sub')
# # def convert_sub(ctx):
# # input_a = ctx.method_args[0]
# # input_b = ctx.method_args[1]
# # output = ctx.method_return
# # input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
# # input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
# # layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.SUB)
# # output._trt = layer.get_output(0)
# # import tensorrt as trt
# # from torch2trt import tensorrt_converter
# @tensorrt_converter('torch.nn.ReLU.forward')
# def convert_ReLU(ctx):
# input = ctx.method_args[1]
# output = ctx.method_return
# layer = ctx.network.add_activation(input=input._trt, type=trt.ActivationType.RELU)
# output._trt = layer.get_output(0)
# @tensorrt_converter('torch.zeros')
# def convert_zeros(ctx):
# input = ctx.method_args[0]
# print(input)
# output = ctx.method_return
# val_tensor = torch.ones(tuple(input), dtype=torch.float32).cpu().numpy()
# layer = ctx.network.add_constant(tuple(input), val_tensor)
# output._trt = layer.get_output(0)
# class Zeros(torch.nn.Module):
# def __init__(self):
# super(Zeros, self).__init__()
# def forward(self, shape):
# return torch.zeros(shape)
# model = Zeros()
# shape = [1, 2, 3, 4]
# print(model(shape))
# # from torch2trt import torch2trt
# # model_trt = torch2trt(model, [torch.tensor(shape, dtype=torch.int32)])
# # y = torch.tensor([2], dtype=torch.int32).cuda()
# # print(model_trt(y))
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.tensor')
model = TorchTensor()
print(model(1))
import torch2trt
x = torch.ones((2, 3)).cuda()
model_trt = torch2trt.torch2trt(model, [x])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 3)])
test_tensor_creation() | [
2,
422,
28034,
17,
2213,
83,
13,
13165,
354,
17,
2213,
83,
1330,
1635,
198,
2,
1330,
28034,
198,
198,
2,
1303,
2488,
83,
22854,
17034,
62,
1102,
332,
353,
10786,
13165,
354,
13,
7266,
11537,
198,
2,
1303,
825,
10385,
62,
7266,
7,
... | 2.238148 | 907 |
import sys
import pkg_resources
sys.path.insert(0, 'anaconda_lib')
sys.path.insert(1, 'anaconda_server')
pkg_resources.declare_namespace(__name__)
| [
11748,
25064,
198,
11748,
279,
10025,
62,
37540,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
705,
272,
330,
13533,
62,
8019,
11537,
198,
17597,
13,
6978,
13,
28463,
7,
16,
11,
705,
272,
330,
13533,
62,
15388,
11537,
198,
35339,
62,
... | 2.625 | 56 |
samples = [
{
"input": {
"array": [
[1, 2, 3, 4],
[12, 13, 14, 5],
[11, 16, 15, 6],
[10, 9, 8, 7],
],
},
"output": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
},
{
"input": {
"array": [
[1, 2, 3],
[12, 13, 4],
[11, 14, 5],
[10, 15, 6],
[9, 8, 7],
],
},
"output": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
},
]
| [
82,
12629,
796,
685,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
15414,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
18747,
1298,
685,
198,
220,
220,
220,
220,
220,
220,
... | 1.378505 | 428 |
import sys
import os
import re
import time
import json
import itertools
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
340,
861,
10141,
628
] | 3.65 | 20 |
from adventofcode2021.utils.abstract import FileReaderSolution
| [
6738,
19980,
1659,
8189,
1238,
2481,
13,
26791,
13,
397,
8709,
1330,
9220,
33634,
46344,
628,
628
] | 3.882353 | 17 |
MOCK_USERS = [
{"email": 'test@test.com', "password": 'test'}
]
MOCK_TABLES = [{"_id": "1", "number": "1",
"owner": "test@test.com", "url": "/newrequest"}]
| [
44,
11290,
62,
2937,
4877,
796,
685,
198,
220,
220,
220,
220,
19779,
12888,
1298,
705,
9288,
31,
9288,
13,
785,
3256,
366,
28712,
1298,
705,
9288,
6,
92,
198,
60,
198,
44,
11290,
62,
5603,
9148,
1546,
796,
685,
4895,
62,
312,
1298... | 2.022727 | 88 |
# noqa
from .provider import (
get_cached_combined,
get_cached_etherscan_api,
get_cached_local_interfaces
)
from .storage import (
ABIKey, FuncStorage
)
__all__ = [
'get_cached_combined',
'get_cached_etherscan_api',
'get_cached_local_interfaces',
'ABIKey', 'FuncStorage'
]
| [
2,
645,
20402,
198,
6738,
764,
15234,
1304,
1330,
357,
198,
220,
220,
220,
651,
62,
66,
2317,
62,
24011,
1389,
11,
198,
220,
220,
220,
651,
62,
66,
2317,
62,
6750,
35836,
62,
15042,
11,
198,
220,
220,
220,
651,
62,
66,
2317,
62,... | 2.233577 | 137 |
'''
--------------------------------------------------
File: app.py
--------------------------------------------------
Author: Deloitte Australia 2021
Description: Defines the application that will provide the API for the recommendation engines
Endpoints:
#TODO
Run with
$ uvicorn src.app:app --reload --host 0.0.0.0 --port 5000
Or build and run with
$ export DOCKER_BUILDKIT=0
$ docker image build -t recommendation-engine-app .
$ docker run -p 5000:5000 --name re-app -d recommendation-engine-app
--------------------------------------------------
Edit History:
# | NAME | DATE | DESC
0 | Grant Holtes | 11/2/21 | Initial Creation
--------------------------------------------------
'''
#FastAPI imports
from fastapi import FastAPI, Response, status, Form
from fastapi.responses import HTMLResponse
import traceback
#model and data pipeline imports
import numpy as np
import json
import os
import csv
#Reqest and Response Schemas
from src.schemas import *
#Config HTTP error codes
bad_input_code = 400
out_of_order_code = 400
general_app_error_code = 500
#Initialise key services
app = FastAPI()
@app.get('/')
@app.get('/health/', status_code = 204)
#Core end-points
@app.post('/product/', status_code=200)
@app.post('/user/', status_code=200)
@app.post('/add_review/', status_code=200)
@app.post('/add_transaction/', status_code=200)
@app.post('/add_view/', status_code=200)
| [
7061,
6,
198,
47232,
438,
198,
8979,
25,
598,
13,
9078,
198,
47232,
438,
198,
13838,
25,
4216,
78,
2654,
4505,
33448,
198,
198,
11828,
25,
2896,
1127,
262,
3586,
326,
481,
2148,
262,
7824,
329,
262,
15602,
11874,
198,
198,
12915,
13... | 3.160356 | 449 |
from flask import request
from flask_restful import Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
from app.api.stammdaten.models import SentosaSetting, SentosaUntersuchung, SentosaSettingSchema, SentosaUntersuchungSchema
| [
6738,
42903,
1330,
2581,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
474,
46569,
62,
35827,
11,
651,
62,
73,
46569,
62,
738,
414,
198,
6738,
598,
13,
15042,
13,
301,
6475,
196... | 3.121951 | 82 |
import unittest
from src.Zad1.Password import *
| [
11748,
555,
715,
395,
198,
6738,
12351,
13,
57,
324,
16,
13,
35215,
1330,
1635,
628
] | 3.0625 | 16 |
import boto3, json, os
'''def put_to_s3(bucket_name, bucket_file_name, temp_creds):
# Push temporary credentials to S3 bucket
s3 = boto3.resource('s3')
result = s3.Object(bucket_name, bucket_file_name).put(Body=str(temp_creds), ServerSideEncryption='AES256')
# s3.Object("st-security-audit", "temp-credstore/credentials.json").put(Body=str(d), ServerSideEncryption='AES256') - Auto replaces existing file.
return result'''
| [
11748,
275,
2069,
18,
11,
33918,
11,
28686,
198,
198,
7061,
6,
4299,
1234,
62,
1462,
62,
82,
18,
7,
27041,
316,
62,
3672,
11,
19236,
62,
7753,
62,
3672,
11,
20218,
62,
66,
445,
82,
2599,
198,
220,
220,
220,
1303,
23691,
8584,
18... | 2.611765 | 170 |
#!/usr/bin/env python3
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--project", required=True,
help="Project that flights service is deployed in")
args = parser.parse_args()
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1', credentials=credentials,
discoveryServiceUrl='https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json')
request_data = {'instances':
[
{
'dep_delay': 16.0,
'taxiout': 13.0,
'distance': 160.0,
'avg_dep_delay': 13.34,
'avg_arr_delay': 67.0,
'carrier': 'AS',
'dep_lat': 61.17,
'dep_lon': -150.00,
'arr_lat': 60.49,
'arr_lon': -145.48,
'origin': 'ANC',
'dest': 'CDV'
}
]
}
PROJECT = args.project
parent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 'flights', 'tf2')
response = api.projects().predict(body=request_data, name=parent).execute()
print("response={0}".format(response))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
201,
198,
6738,
23645,
499,
291,
75,
1153,
1330,
9412,
201,
198,
6738,
267,
18439,
17,
16366,
13,
16366,
1330,
3012,
34,
445,
14817,
201,
198,
11748,
1822,
29572,
201,
198,
... | 2.208955 | 536 |
#!/usr/bin/env python
import typer
from lib import aoc
from typing import List
SAMPLE = [
'abc\n\na\nb\nc\n\nab\nac\n\na\na\na\na\n\nb',
"abc\n\na\nb\nc\n\nab\nac\n\na\na\na\na\n\nb",
]
if __name__ == '__main__':
typer.run(Day06().run)
# vim:ts=2:sw=2:expandtab
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1259,
525,
198,
6738,
9195,
1330,
257,
420,
198,
6738,
19720,
1330,
7343,
198,
198,
49302,
16437,
796,
685,
198,
220,
705,
39305,
59,
77,
59,
2616,
59,
46803,
59,
10782,
... | 1.902778 | 144 |
'''
Copyright (c) 2013 by JustAMan at GitHub
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This script performs cleanup of Windows Installer cache trying to be as safe as possible:
it removes only *.msi/*.msp files that are not references as installed on the system (most
likely some leftover junk after unsuccessful installations).
If you break your Windows Installer cache here's a link to MS blog describing the way to fix it:
http://blogs.msdn.com/heaths/archive/2006/11/30/rebuilding-the-installer-cache.aspx
'''
from msi_helpers import getAllPatches, getAllProducts
from win32elevate import elevateAdminRights
from common_helpers import MB
import os
import glob
import errno
def getCachedMsiFiles(ext):
'''
Finds all cached MSI files at %SystemRoot%\Installer\*.<ext>
ext can be 'msi' (for installation) or 'msp' (for patches)
'''
return [fn.lower() for fn in glob.glob(os.path.join(os.getenv('SystemRoot'), 'Installer',
'*.%s' % ext))]
def unsquishGuid(guid):
'''
Unsquishes a GUID (squished GUIDs are used in %SystemRoot%\Installer\$PatchCache$\*
'''
squeezedGuid = ''.join(c2 + c1 for (c1, c2) in zip(*[iter(guid)]*2))
return '{%s}' % '-'.join([_rotateString(squeezedGuid[:8]),
_rotateString(squeezedGuid[8:12]),
_rotateString(squeezedGuid[12:16]),
squeezedGuid[16:20], squeezedGuid[20:]])
if __name__ == '__main__':
main()
| [
7061,
6,
198,
15269,
357,
66,
8,
2211,
416,
2329,
2390,
272,
379,
21722,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
198,
5661,
3788,
290,
3917,
10314,
3696,
357,
1169,
366... | 2.860731 | 876 |
#!/usr/bin/env python
import sys, math
from numpy import *
"""
As datatrans3a.py, but splitting the whole file at once,
store the numbers in a one-dimensional NumPy and then
reshaping the array appropriately.
"""
try:
infilename = sys.argv[1]; outfilename = sys.argv[2]
except:
print "Usage:",sys.argv[0], "infile outfile"; sys.exit(1)
# read (x,y) data from file into a NumPy array data:
f = open(infilename, 'r')
data = array(map(float, f.read().split()))
# (map is normally faster than [float(x) for x in f.read().split()])
data.shape = (len(data)/2,2)
# transform y values:
x = data[:,0]
y = data[:,1]
y = myfunc(y)
f = open(outfilename, 'w')
import scitools.filetable
scitools.filetable.write_columns(f, x, y)
f.close()
# end
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
11,
10688,
198,
6738,
299,
32152,
1330,
1635,
198,
37811,
198,
1722,
4818,
265,
26084,
18,
64,
13,
9078,
11,
475,
26021,
262,
2187,
2393,
379,
1752,
11,
198,
8095,
262,
... | 2.591696 | 289 |
# coding: utf-8
from __future__ import absolute_import
from .account_service_api import AccountServiceApi
from .application_user_service_api import ApplicationUserServiceApi
from .card_processing_service_api import CardProcessingServiceApi
from .charge_attempt_service_api import ChargeAttemptServiceApi
from .charge_flow_level_payment_link_service_api import ChargeFlowLevelPaymentLinkServiceApi
from .charge_flow_level_service_api import ChargeFlowLevelServiceApi
from .charge_flow_service_api import ChargeFlowServiceApi
from .condition_type_service_api import ConditionTypeServiceApi
from .country_service_api import CountryServiceApi
from .country_state_service_api import CountryStateServiceApi
from .currency_service_api import CurrencyServiceApi
from .customer_address_service_api import CustomerAddressServiceApi
from .customer_comment_service_api import CustomerCommentServiceApi
from .customer_service_api import CustomerServiceApi
from .delivery_indication_service_api import DeliveryIndicationServiceApi
from .document_template_service_api import DocumentTemplateServiceApi
from .document_template_type_service_api import DocumentTemplateTypeServiceApi
from .human_user_service_api import HumanUserServiceApi
from .label_description_group_service_api import LabelDescriptionGroupServiceApi
from .label_description_service_api import LabelDescriptionServiceApi
from .language_service_api import LanguageServiceApi
from .legal_organization_form_service_api import LegalOrganizationFormServiceApi
from .manual_task_service_api import ManualTaskServiceApi
from .payment_connector_configuration_service_api import PaymentConnectorConfigurationServiceApi
from .payment_connector_service_api import PaymentConnectorServiceApi
from .payment_link_service_api import PaymentLinkServiceApi
from .payment_method_brand_service_api import PaymentMethodBrandServiceApi
from .payment_method_configuration_service_api import PaymentMethodConfigurationServiceApi
from .payment_method_service_api import PaymentMethodServiceApi
from .payment_processor_configuration_service_api import PaymentProcessorConfigurationServiceApi
from .payment_processor_service_api import PaymentProcessorServiceApi
from .payment_terminal_service_api import PaymentTerminalServiceApi
from .payment_terminal_till_service_api import PaymentTerminalTillServiceApi
from .permission_service_api import PermissionServiceApi
from .refund_comment_service_api import RefundCommentServiceApi
from .refund_service_api import RefundServiceApi
from .shopify_transaction_service_api import ShopifyTransactionServiceApi
from .space_service_api import SpaceServiceApi
from .static_value_service_api import StaticValueServiceApi
from .subscriber_service_api import SubscriberServiceApi
from .subscription_affiliate_service_api import SubscriptionAffiliateServiceApi
from .subscription_charge_service_api import SubscriptionChargeServiceApi
from .subscription_ledger_entry_service_api import SubscriptionLedgerEntryServiceApi
from .subscription_metric_service_api import SubscriptionMetricServiceApi
from .subscription_metric_usage_service_api import SubscriptionMetricUsageServiceApi
from .subscription_period_bill_service_api import SubscriptionPeriodBillServiceApi
from .subscription_product_component_group_service_api import SubscriptionProductComponentGroupServiceApi
from .subscription_product_component_service_api import SubscriptionProductComponentServiceApi
from .subscription_product_fee_tier_service_api import SubscriptionProductFeeTierServiceApi
from .subscription_product_metered_fee_service_api import SubscriptionProductMeteredFeeServiceApi
from .subscription_product_period_fee_service_api import SubscriptionProductPeriodFeeServiceApi
from .subscription_product_retirement_service_api import SubscriptionProductRetirementServiceApi
from .subscription_product_service_api import SubscriptionProductServiceApi
from .subscription_product_setup_fee_service_api import SubscriptionProductSetupFeeServiceApi
from .subscription_product_version_retirement_service_api import SubscriptionProductVersionRetirementServiceApi
from .subscription_product_version_service_api import SubscriptionProductVersionServiceApi
from .subscription_service_api import SubscriptionServiceApi
from .subscription_suspension_service_api import SubscriptionSuspensionServiceApi
from .subscription_version_service_api import SubscriptionVersionServiceApi
from .token_service_api import TokenServiceApi
from .token_version_service_api import TokenVersionServiceApi
from .transaction_comment_service_api import TransactionCommentServiceApi
from .transaction_completion_service_api import TransactionCompletionServiceApi
from .transaction_iframe_service_api import TransactionIframeServiceApi
from .transaction_invoice_comment_service_api import TransactionInvoiceCommentServiceApi
from .transaction_invoice_service_api import TransactionInvoiceServiceApi
from .transaction_lightbox_service_api import TransactionLightboxServiceApi
from .transaction_line_item_version_service_api import TransactionLineItemVersionServiceApi
from .transaction_mobile_sdk_service_api import TransactionMobileSdkServiceApi
from .transaction_payment_page_service_api import TransactionPaymentPageServiceApi
from .transaction_service_api import TransactionServiceApi
from .transaction_terminal_service_api import TransactionTerminalServiceApi
from .transaction_void_service_api import TransactionVoidServiceApi
from .user_account_role_service_api import UserAccountRoleServiceApi
from .user_space_role_service_api import UserSpaceRoleServiceApi
from .webhook_listener_service_api import WebhookListenerServiceApi
from .webhook_url_service_api import WebhookUrlServiceApi
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
764,
23317,
62,
15271,
62,
15042,
1330,
10781,
16177,
32,
14415,
198,
6738,
764,
31438,
62,
7220,
62,
15271,
62,
15042,
1330,
... | 3.906658 | 1,457 |
# These need to be at the top to allow for running on cluster
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
# Other imports
import numpy as np
import json
import math
from h_buildModels import build_par_inject_model, build_incv3_feat, build_resnet50_feat, build_brownlee_model, \
build_attention_model, build_basic_model, ExternalAttentionRNNWrapper, build_webshopincluded_model, \
build_category_brownlee_model, build_category_merge_model, build_category_parinject_model, \
build_img_cat_brownlee_model, build_img_cat_merge_model, build_img_cat_parinject_model
from h_customGenerator import CustomGenerator, BLEU_validation, CategoricGenerator, AttributeGenerator
import pickle
from tensorflow.keras.utils import plot_model
import tensorflow as tf
from h_utils import get_desc, compute_corpusbleu, compute_ROUGE, masked_categorical_crossentropy
import random
from nltk.translate.bleu_score import corpus_bleu
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
import tensorflow.keras.backend as K
from tqdm import tqdm
import gc # garbage collection due to keras memory leak
from tensorflow.keras.models import load_model, save_model
import time
import trace
# Type testing function
| [
2,
2312,
761,
284,
307,
379,
262,
1353,
284,
1249,
329,
2491,
319,
13946,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
201,
198,
66,
16993,
796,
28686,
13,
1136,
66,
16993,
3419,
201,
198,
17597,
13,
6978,
13,
33295,
7,... | 2.952183 | 481 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
class OSType(Enum):
"""
Action descriptor type
"""
REFERENCE = b'obj '
DESCRIPTOR = b'Objc'
LIST = b'VlLs'
DOUBLE = b'doub'
UNIT_FLOAT = b'UntF'
UNIT_FLOATS = b'UnFl'
STRING = b'TEXT'
ENUMERATED = b'enum'
INTEGER = b'long'
BOOLEAN = b'bool'
GLOBAL_OBJECT = b'GlbO'
CLASS1 = b'type'
CLASS2 = b'GlbC'
ALIAS = b'alis'
RAW_DATA = b'tdta'
OBJECT_ARRAY = b'ObAr'
class ReferenceOSType(Enum):
"""
OS Type keys for Reference Structure
"""
PROPERTY = b'prop'
CLASS = b'Clss'
ENUMERATED_REFERENCE = b'Enmr'
OFFSET = b'rele'
IDENTIFIER = b'Idnt'
INDEX = b'indx'
NAME = b'name'
class EffectOSType(Enum):
"""
OS Type keys for Layer Effects
"""
COMMON_STATE = b'cmnS'
DROP_SHADOW = b'dsdw'
INNER_SHADOW = b'isdw'
OUTER_GLOW = b'oglw'
INNER_GLOW = b'iglw'
BEVEL = b'bevl'
SOLID_FILL = b'sofi'
class UnitFloatType(Enum):
"""
Units the value is in (used in Unit float structure)
"""
ANGLE = b'#Ang' # base degrees
DENSITY = b'#Rsl' # base per inch
DISTANCE = b'#Rlt' # base 72ppi
NONE = b'#Nne' # coerced
PERCENT = b'#Prc' # unit value
PIXELS = b'#Pxl' # tagged unit value
POINTS = b'#Pnt' # points
MILLIMETERS = b'#Mlm' # millimeters
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
628,
628,
628,
628,
628,
628,
198,
4871,
440,
2257,
2981,
7,
4834,
388,
2599,
198,
220... | 2.027221 | 698 |
# -*- coding: utf-8 -*-
"""
SolarDB configuration.
"""
import argparse
import datetime
import logging
import pickle as pk
from enum import Enum
from io import StringIO
from typing import Any, Dict, Union, TextIO, TypeVar, List, Optional
from solardb.common.cache import Cache
def split_with_keywords(to_split: [str], keywords: [str]) -> [[str]]:
"""
Split given list of strings into sub-lists which
contain only one keyword each.
:param to_split: Vector of strings to split.
:param keywords: Keywords to split the list by.
:return: Returns list of list of lists split by keywords.
"""
result = []
used_keywords = []
last_string_idx = 0
for idx, val in enumerate(to_split):
if val not in keywords or val in used_keywords:
continue
if last_string_idx != idx:
result.append(to_split[last_string_idx:idx])
last_string_idx = idx
used_keywords.append(val)
result.append(to_split[last_string_idx:])
return result
class Config(Cache):
"""
Container for application configuration. Default options
are automatically configured.
"""
DEFAULT_OPTIONS = {}
""" Default option values. """
Parser = argparse.ArgumentParser
""" Shortcut for the argument parser. """
@property
def start_time(self):
""" Get time of starting the application. """
return self._start_time
@property
def runtime_arguments(self):
""" Get command line arguments for the current runtime. """
return self._runtime_arguments
def init_options(self):
"""
Initialize all options to default values.
"""
self.cache = Config.DEFAULT_OPTIONS.copy()
# Include arguments from the main parser.
sub_commands = {"main": self.parser}
sub_commands.update(self.sub_commands.choices)
# Go through all commands and initialize config options.
for name, sub in sub_commands.items():
for action in sub._actions:
# Skip options with no default.
if action.default is None or \
action.default == argparse.SUPPRESS:
continue
self.set_path(action.dest, action.default, create=True)
def add_subcommand(self, name: str) -> argparse.ArgumentParser:
"""
Add new sub-command for argument parsing.
:param name: Name of the sub-command.
:return: Returns parser for the sub-command.
"""
return self.sub_commands.add_parser(name)
def get_arg_parser(self) -> argparse.ArgumentParser:
"""
Access the main argument parser, which can be used
to add more arguments.
:return: Returns the main argument parser.
"""
return self.parser
def parse_args(self, argv: [str]):
"""
Parse command line arguments and fill corresponding
options.
:param argv: Vector of command line arguments.
"""
self._runtime_arguments = argv
# Split command line into sub-command lines.
subcommand_argvs = self._split_subcommand_argvs(argv)
parsed = []
# Parse each sub-command line.
for subcommand_argv in subcommand_argvs:
subcommand = subcommand_argv[0] if subcommand_argv else None
parser = self.parser
if subcommand in self.sub_commands.choices:
self.sub_commands_specified.append(subcommand)
parser = self.sub_commands.choices[subcommand]
subcommand_argv = subcommand_argv[1:]
parsed.append(parser.parse_args(subcommand_argv))
# Set corresponding config options.
for namespace in parsed:
for var, val in vars(namespace).items():
self.set_path(var, val, create=True)
def subcommand_arguments(self, subcommand: str,
argv: Optional[List[str]] = None) -> List[str]:
"""
Get list of arguments for given subcommand.
:param subcommand: Subcommand name to get.
:param argv: Optional argument vector to use. Set to None
to use current runtime arguments.
:return: Returns list of arguments for given subcommand.
"""
subcommand_argvs = {
commands[0]: commands[1:]
for commands in self._split_subcommand_argvs(argv or self._runtime_arguments)
if commands
}
subcommand_argvs.get(subcommand, [ ])
def subcommand_arguments_equal(self, argv1: [str], argv2: [str],
subcommand: Optional[str]):
"""
Compare given argument vectors and return whether they have
the same options for given sub-command.
:param argv1: First argument vector being compared.
:param argv2: Second argument vector being compared.
:param subcommand: Subcommand to check. When None, all arguments
are checked.
:return: Returns True if both argument vectors are the same.
"""
if subcommand is None:
return argv1 == argv2
subcommand_argvs1 = {
commands[0]: commands[1:]
for commands in self._split_subcommand_argvs(argv1)
if commands
}
subcommand_argvs2 = {
commands[0]: commands[1:]
for commands in self._split_subcommand_argvs(argv2)
if commands
}
if subcommand not in subcommand_argvs1 or \
subcommand not in subcommand_argvs2:
return False
return subcommand_argvs1[subcommand] == subcommand_argvs2[subcommand]
def _split_subcommand_argvs(self, argv: [str]) -> [[str]]:
"""
Split given argument vector into sub-vectors which
contain only one sub-command each.
:param argv: Vector of command line arguments.
:return: Returns list of command line argument vectors.
"""
sub_commands = self.sub_commands.choices.keys()
sub_command_names = [sub for sub in sub_commands]
return split_with_keywords(argv, sub_command_names)
T = TypeVar("T")
def get_instance(self, cls: T) -> T:
""" Get the main instance of given class. """
if not hasattr(cls, f"COMMAND_PATH"):
raise RuntimeError(f"Unable to get instance of unregistered class "
f"{cls.__name__}, did you forget to register_config()?")
return self.__getitem__(cls.COMMAND_PATH + ".instance")
class ConfigTemplate:
"""
Helper class used for wrapping configuration parameters.
"""
def copy(self) -> "ConfigTemplate":
"""
Create a copy of this config.
:return: Returns the new copy.
"""
result = ConfigTemplate()
result.managed_parameters = self.managed_parameters.copy()
return result
def clear_parameter_values(self):
"""
Clear only parameter values, not the list
of managed parameters. All parameters will
be set to None.
"""
for param_name in self.managed_parameters:
self.managed_parameters[param_name] = None
def clear_parameters(self):
"""
Clear all managed parameters and their values.
"""
self.managed_parameters = {}
def add_parameter(self,
param_name: str):
"""
Add a new managed parameter to this config.
:param param_name: Name of the parameter.
:raises AttributeError: Raised when parameter
with given name already exists.
"""
if param_name in self.managed_parameters:
raise AttributeError("Given parameter already exists!")
self.managed_parameters[param_name] = None
def set_parameters_from_config(self,
config: Config,
var_getter: Optional[object] = None):
"""
Get parameter values from given config.
:param config: Config to get the values from.
:param var_getter: Optional object with var_path
method, which returns path to variable within
the config.
"""
for param_name in self.managed_parameters:
config_name = param_name
if var_getter is not None:
config_name = var_getter.var_path(param_name)
try:
param_value = config.get_path(
config_name,
create=False,
none_when_missing=False
)
self.managed_parameters[param_name] = param_value
except KeyError:
# Missing parameter -> Do nothing!
pass
def serialize(self) -> bytes:
"""
Serialize all of the parameters.
:return: Returns string representing the
parameters.
"""
return pk.dumps(self.managed_parameters)
def deserialize(self, serialized: bytes):
"""
Deserialize all of the parameters.
:param serialized: Serialized string containing
the parameters.
"""
self.managed_parameters = pk.loads(serialized)
def __getattr__(self,
param_name: str):
"""
Lookup parameter within this config.
:param param_name:
:raises AttributeError: Raised when parameter
with given name does not exist.
:return: Value of the parameter.
"""
if param_name not in self.managed_parameters:
raise AttributeError("Given parameter ({}) does NOT exists!".format(param_name))
return self.managed_parameters[param_name]
class ConfigurableMeta(type):
"""
Meta-class which generates makes class configurable.
Inspired by: https://stackoverflow.com/a/50731615 .
"""
class Configurable(object):
"""
Inheritable helper, which allows any class to
become configurable.
"""
@classmethod
def register_options(cls, parser: Config.Parser):
""" Dummy version of register options which should be overriden. """
pass
@classmethod
def _add_config_parameter(cls, var_name: str) -> str:
""" Add given option to the configuration template and return full name. """
ct = cls._get_class_config_template()
ct.add_parameter(var_name)
return cls.var_path_name(cls.COMMAND_PATH, var_name)
@classmethod
def _get_class_config_template(cls) -> ConfigTemplate:
""" Get configuration template for this class. """
if not hasattr(cls, f"_{cls.__name__}__ct"):
raise RuntimeError(f"No configuration template found for class "
f"{cls.__name__}, did you forget to register_config()?")
return getattr(cls, f"_{cls.__name__}__ct")
@classmethod
def _initialize_class(cls):
""" Initialize this class with required members. """
if not hasattr(cls, "COMMAND_NAME"):
cls.COMMAND_NAME = cls.__name__
if not hasattr(cls, "COMMAND_PATH"):
cls.COMMAND_PATH = cls.COMMAND_NAME.lower()
# Explicit name mangling...
# Set default configuration template.
setattr(cls, f"_{cls.__name__}__ct", ConfigTemplate())
@classmethod
def register_config(cls, config: Config):
""" Register class configuration in provided config. """
cls._initialize_class()
parser = config.add_subcommand(cls.COMMAND_NAME)
cls.register_options(parser)
@classmethod
def register_model(cls, config: Config):
""" Register model configuration in provided config. """
cls._initialize_class()
parser = config.add_model(cls.COMMAND_NAME, cls)
cls.register_options(parser)
@classmethod
def var_path_name(cls, command_path: str, var_name: str):
"""
Get path to the given variable in the configuration
system.
:param command_path: Name of the command.
:param var_name: Name / path of the variable.
:return: Fully qualified variable name.
"""
return command_path + "." + var_name
def serialize_config(self) -> dict:
""" Serialize configuration for this object. """
return {
"config_data": self.c.serialize()
}
def deserialize_config(self, cfg: dict):
""" Deserialize configuration for this object from given dictionary. """
self.c.deserialize(cfg["config_data"])
def _set_instance(self):
""" Use the self instance as the main instance for this class. """
self.config[self.var_path("instance")] = self
T = TypeVar("T")
def get_instance(self, cls: T) -> T:
""" Get the main instance of given class. """
if not hasattr(cls, f"COMMAND_PATH"):
raise RuntimeError(f"Unable to get instance of unregistered class "
f"{cls.__name__}, did you forget to register_config()?")
return self.config[self.var_path_name(cls.COMMAND_PATH, "instance")]
def var_path(self, var_name: str) -> str:
"""
Get path to the given variable in the configuration
system.
:param var_name: Name / path of the variable.
:return: Fully qualified variable name.
"""
return self.var_path_name(self.COMMAND_PATH, var_name)
def get_var(self, var_name: str) -> Any:
"""
Get variable by name.
:param var_name: Variable name / path, which was
provided to the var_path method.
:return: Returns value of the variable.
"""
return self.config[self.var_path(var_name)]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
38825,
11012,
8398,
13,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
2298,
293,
355,
279,
74,
198,
6... | 2.370181 | 5,862 |
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
# data length
m = 100
# param: [0.5, 1, 2]
X = 6 * np.random.rand(m, 1) - 3
noise = np.random.randn(m, 1)
# polynomial
y = 0.5 * X**2 + X + 2 + noise
# true param to be estimated
coeff = [0.5, 1, 2]
# tmp = X*X
# tmp[0]
# X[0] * X[0]
XX = np.c_[np.ones(m), X, X*X]
# (100, 3)
# XX.shape
yy = np.dot(XX, coeff)
#------------------------------------------------------------
# linear model
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
# fit the training data
lin_reg.fit(X, yy)
lin_reg.coef_, lin_reg.intercept_,
# test with the test data
from sklearn.metrics import mean_absolute_error
# make a test data set
X_test = 6 * np.random.rand(m, 1) - 3
y_test = 0.5 * X_test**2 + X_test + 2 + np.random.randn(m, 1)
y_pred = lin_reg.predict(X_test)
lin_mae = mean_absolute_error(y_test, y_pred)
# 3.20
lin_mae
#------------------------------------------------------------
X_new=np.linspace(-3, 3, 100).reshape(100, 1)
# predict Y using linear regression
y_new = lin_reg.predict(X_new)
plt.plot(X_test, y_test, "b.")
plt.plot(X_new, y_new, "r-", linewidth=2, label="Predictions")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.show()
#------------------------------------------------------------
from sklearn.preprocessing import PolynomialFeatures
# degree is a param
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
# fit after poly transformation
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.coef_, lin_reg.intercept_
X_poly_test = poly_features.fit_transform(X_test)
y_pred = lin_reg.predict(X_poly_test)
lin_mae = mean_absolute_error(y_test, y_pred)
# 0.809
lin_mae
#------------------------------------------------------------
# testing data set
#------------------------------------------------------------
lower = -3
upper = 6
X_new=np.linspace(lower, upper, 100).reshape(100, 1)
# predict Y using linear regression
X_poly_new = poly_features.fit_transform(X_new)
y_new = lin_reg.predict(X_poly_new)
plt.plot(X_test, y_test, "b.")
plt.plot(X_new, y_new, "r-", linewidth=2, label="Predictions")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.show()
#------------------------------------------------------------
# degress as a param
# overfitted is severe outside the range
lower = -3
upper = 6
X_new=np.linspace(lower, upper, 100).reshape(100, 1)
degrees = range(1, 8)
for deg in degrees:
poly_features = PolynomialFeatures(degree=deg, include_bias=False)
X_poly = poly_features.fit_transform(X_new)
# fit after poly transformation
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
X_poly_test = poly_features.fit_transform(X_test)
y_pred = lin_reg.predict(X_poly_test)
lin_mae = mean_absolute_error(y_test, y_pred)
print(lin_mae)
#------------------------------------------------------------
# plot degree = 8
plot_poly(2)
plot_poly(3)
plot_poly(4)
plot_poly(5) | [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
299,
32152,
13,
25120,
355,
374,
358,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
201,
198,
2,
1366,
4129,
201,
198,
76,
796,
1802,
201,
198,
2,
5772,
... | 2.496201 | 1,316 |
# -*- coding: utf-8 -*-
array = [1,2,3,4,5,6]
# print map(sum_1, array)
print reduce(add, array)
print filter(big, array) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
18747,
796,
685,
16,
11,
17,
11,
18,
11,
19,
11,
20,
11,
21,
60,
198,
198,
2,
3601,
3975,
7,
16345,
62,
16,
11,
7177,
8,
628,
198,
4798,
4646,
7,
2860,
11,
717... | 2.118644 | 59 |
# -*- coding: utf-8 -*-
from .....Functions.Plot.plot_A_fft2 import plot_A_fft2 as plot_A_fft2_fct
def plot_A_fft2(
self,
Data_str,
is_phase=False,
is_deg=True,
is_elecorder=False,
is_spaceorder=False,
freq_max=20000,
r_max=100,
mag_max=None,
is_norm=False,
unit="SI",
colormap=None,
save_path=None,
):
"""2D color plot of the 2D Fourier Transform of a field
Parameters
----------
self : Output
an Output object
Data_str : str
name of the Data Object to plot (e.g. "mag.Br")
is_phase : bool
boolean indicating if the phase must be plot (subplot)
is_deg : bool
boolean indicating if the phase must be converted to degrees
is_elecorder : bool
boolean indicating if we want to use the electrical order for the fft axis
is_spaceorder : bool
boolean indicating if we want to use the spatial order for the fft axis
freq_max : int
maximum value of the frequency for the fft axis
r_max : int
maximum value of the wavenumber for the fft axis
is_norm : bool
boolean indicating if the field must be normalized
unit : str
unit in which to plot the field
colormap : colormap object
colormap prescribed by user
save_path : str
path and name of the png file to save
"""
# Get Data object names
phys = getattr(self, Data_str.split(".")[0])
data = getattr(phys, Data_str.split(".")[1])
# Call the plot function
plot_A_fft2_fct(
data,
is_phase=is_phase,
is_deg=is_deg,
is_elecorder=is_elecorder,
is_spaceorder=is_spaceorder,
freq_max=freq_max,
r_max=r_max,
mag_max=mag_max,
is_norm=is_norm,
unit=unit,
colormap=colormap,
save_path=save_path,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11485,
986,
24629,
2733,
13,
43328,
13,
29487,
62,
32,
62,
487,
83,
17,
1330,
7110,
62,
32,
62,
487,
83,
17,
355,
7110,
62,
32,
62,
487,
83,
17,
62,
6... | 2.25393 | 827 |
"""
Define the API functions
"""
from time import time
from fastapi import status, Query
from fastapi.responses import JSONResponse
from api import m_f
from api.lookup import m_entity_search
from api_f import app, api_info
from api_f.f_models import *
@app.get("/", tags=["introduction"])
@app.get(
"/entity_info/{wikidata_id}",
tags=["get_entity_info"],
response_model=ItemInfo,
responses={
404: {"description": "The item was not found"},
200: {"description": "Item requested by Wikidata ID, example: Q1490"},
},
response_model_exclude_none=True,
)
@app.get(
"/entity_search/",
tags=["entity_search"],
response_model=SearchOutput,
response_model_exclude_none=True,
responses={
400: {
"description": "Bad request. Please enter at least one of q (query), attr (attribute) parameter."
},
},
)
@app.get("/mtab/", tags=["table_annotation"])
| [
37811,
198,
7469,
500,
262,
7824,
5499,
198,
37811,
198,
6738,
640,
1330,
640,
198,
198,
6738,
3049,
15042,
1330,
3722,
11,
43301,
198,
6738,
3049,
15042,
13,
16733,
274,
1330,
19449,
31077,
198,
6738,
40391,
1330,
285,
62,
69,
198,
6... | 2.608333 | 360 |
import logging
from d2ix import RawData
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
288,
17,
844,
1330,
16089,
6601,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 2.962963 | 27 |
# everything specific to an environment
OLD_TALKS_SERVER = None
OLD_TALKS_USER = None
OLD_TALKS_PASSWORD = None
# default values for HTTP API calls
API_OX_PLACES_URL = 'https://api.m.ox.ac.uk/places/'
API_OX_DATES_URL = 'https://api.m.ox.ac.uk/dates/'
TOPICS_URL = 'https://talks-dev.oucs.ox.ac.uk/topics/'
| [
2,
2279,
2176,
284,
281,
2858,
198,
198,
15173,
62,
51,
1847,
27015,
62,
35009,
5959,
796,
6045,
198,
15173,
62,
51,
1847,
27015,
62,
29904,
796,
6045,
198,
15173,
62,
51,
1847,
27015,
62,
47924,
54,
12532,
796,
6045,
198,
198,
2,
... | 2.376923 | 130 |
""" db_util 180903_2140
180809_1100
171215_1150
"""
import logging
import copy
from tittles import mod
_t = mod.Mod("tittles.tittles")
_dbc = mod.Mod("dbcore") # pylint: disable-msg=C0103
if __name__ == "__main__":
logging.basicConfig(format="%(levelname)s: %(module)s:%(lineno)d(%(funcName)s) %(message)s", level=logging.DEBUG)
_log = logging.getLogger(__name__) # pylint: disable-msg=C0103
DB_ERR_TABLE_DOES_NOT_EXIST = "42P01"
__DB_CLASS_KIND_TABLE = "'r', ''"
# TODO update list_constraints function to get source from pg_get_constraintdef
def db_table_ddl(conn, table_name, table_cols, table_seqs, table_cons, **kwargs):
""" Generate create table DDL
"""
# Sequences
if table_seqs:
for s_ in table_seqs:
c_ = _t.m.daffkv(table_cols, "col_name", s_["col_name"])
if c_:
c_["is_seq"] = True
c_["col_type"] = "serial"
else:
raise _t.m.DbIntgrError("Sequence '%s' not related to any table '%s' column" % (s_["seq_name"], table_name))
# Columns
cols_ = []
for c_ in table_cols:
cols_.append("%s %s%s" % (c_["col_name"], c_["col_type"], c_.get("not_null") and " NOT NULL" or ""))
# Constraints
cons_ = []
if table_cons:
for c_ in table_cons:
if c_["con_type"] == "c":
cons_.append("CONSTRAINT %s %s" % (c_["con_name"], c_["con_src"]))
# Table prefix
table_pfx_ = kwargs.get("table_prefix", "")
# Construct DDL statement
stmt_ = "CREATE TABLE %s%s (%s%s)" % (table_pfx_, table_name, ", ".join(cols_), cons_ and ", %s" % ", ".join(cons_) or "")
if kwargs.get("apply"): conn.execute(stmt_, **kwargs)
return [stmt_, ]
if __name__ == "__main__": __test()
| [
37811,
20613,
62,
22602,
1248,
2931,
3070,
62,
17,
15187,
198,
220,
220,
220,
220,
220,
220,
220,
11546,
34583,
62,
42060,
198,
220,
220,
220,
220,
220,
220,
220,
1596,
1065,
1314,
62,
1157,
1120,
198,
37811,
198,
11748,
18931,
198,
... | 2.088812 | 867 |
# -*- coding:utf-8 -*-
from distutils.core import setup
setup(
name="thenextquant",
version="0.0.6",
packages=["quant",
"quant.utils",
"quant.platform",
],
description="Quant Trader Framework",
url="https://github.com/TheNextQuant/thenextquant",
author="huangtao",
author_email="huangtao@ifclover.com",
license="MIT",
keywords=["thenextquant", "quant"],
install_requires=[
"aiohttp==3.2.1",
"aioamqp==0.10.0",
],
)
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
1169,
19545,
40972,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
15,
13... | 2.105263 | 247 |
#!/usr/bin/env python
from pwn import *
SERVER = "mustard.stt.rnl.tecnico.ulisboa.pt"
PORT = 10093
context.arch = "i386"
context.os = "linux"
e = ELF("bin")
SYM = e.symbols["target"]
PTR = p32(SYM)
PADD = 64
POS = 7
s = remote(SERVER, PORT)
s.sendline(PTR + "%{}x".format(PADD) + "%{}$n".format(POS))
print(s.recvuntil("}"))
s.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
279,
675,
1330,
1635,
198,
198,
35009,
5959,
796,
366,
27238,
446,
13,
301,
83,
13,
81,
21283,
13,
660,
31522,
3713,
13,
377,
271,
48614,
13,
457,
1,
198,
15490,
796,
1802,
6... | 2.098765 | 162 |
from classes.cell_trace_config import CellTraceConfig, cell_trace_config_filter
import logging
import numpy as np
from scipy.stats import mannwhitneyu
| [
6738,
6097,
13,
3846,
62,
40546,
62,
11250,
1330,
12440,
2898,
558,
16934,
11,
2685,
62,
40546,
62,
11250,
62,
24455,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
34242,
1330,
582,
77,
1929,
270,... | 3.319149 | 47 |
from microbit import *
wheels=[
{
'forward': [9,14,4,18,10,15,6,-2,16,7,-9,-7,1,-6,11,2,-13,-7,-18,-1,3,-10,-14,-21,-5,-3],
'reverse': [18,9,21,13,7,2,-4,6,14,-9,7,10,-6,-1,-10,-14,-7,-2,1,5,-15,-18,3,-3,-16,-11],
'offset': 0
},
{
'forward': [13,24,7,4,2,12,-4,16,4,15,8,11,-11,1,6,-10,-16,-9,3,-8,-5,-17,-12,-7,-21,-6],
'reverse': [16,11,4,21,17,10,-2,-4,9,-7,12,8,-4,-13,-1,5,7,-12,-8,6,-6,-3,-11,-16,-15,-24],
'offset': 0
},
{
'forward': [5,9,14,4,15,6,17,7,-6,-8,-1,7,3,-10,11,2,-16,-5,-14,3,-7,-13,-2,1,-18,-4],
'reverse': [16,8,6,10,14,-5,18,-4,13,1,-9,-6,5,7,-7,-3,-14,-2,-7,-15,2,4,-3,-17,-1,-11],
'offset': 0
}
]
reflector = [4,12,8,13,-4,15,18,15,1,-1,-8,3,3,-12,-3,-3,-13,6,7,2,-15,-2,-15,-6,-18,-7]
pinboard = [23,23,5,22,1,-1,6,-5,7,0,0,0,-6,3,3,-7,-3,-3,4,0,0,0,-4,-23,-23,-22]
pins = [pin12,pin13,pin14,pin15,pin16]
val = 0
char = ""
while True:
tval = 0
for i in range(5):
tval *= 2
if pins[i].read_digital():
tval += 1
if tval == 0:
val = 0
val |= tval
if val > 0:
char = chr(val+64)
display.show(char)
else:
if char != "":
display.clear()
sleep(500)
display.show(barnaby(char))
char = ""
#while True:
# if uart.any():
# data = uart.readall()
# char = chr(int.from_bytes(data,'big'))
# display.show(barnaby(char))
| [
6738,
4580,
2545,
1330,
1635,
198,
198,
12491,
1424,
41888,
198,
90,
198,
220,
705,
11813,
10354,
685,
24,
11,
1415,
11,
19,
11,
1507,
11,
940,
11,
1314,
11,
21,
12095,
17,
11,
1433,
11,
22,
12095,
24,
12095,
22,
11,
16,
12095,
... | 1.673759 | 846 |
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
import sys
from CashFlows import CashFlowGroup
import _utils as hutils
raven_path = hutils.get_raven_loc()
sys.path.append(raven_path)
class CashFlowUser:
"""
Base class for objects that want to access the functionality of the CashFlow objects.
Generally this means the CashFlowUser will have an "economics" xml node used to define it,
and will have a group of cash flows associated with it (e.g. a "component")
In almost all cases, initialization methods should be called as part of the inheritor's method call.
"""
@classmethod
def get_input_specs(cls, spec):
"""
Collects input specifications for this class.
Note this needs to be called as part of an inheriting class's specification definition
@ In, spec, InputData, specifications that need cash flow added to it
@ Out, input_specs, InputData, specs
"""
# this unit probably has some economics
spec.addSub(CashFlowGroup.get_input_specs())
return spec
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
self._economics = None # CashFlowGroup
def read_input(self, specs):
"""
Sets settings from input file
@ In, specs, InputData params, input from user
@ Out, None
"""
self._economics = CashFlowGroup(self)
self._economics.read_input(specs)
def get_crossrefs(self):
"""
Collect the required value entities needed for this component to function.
@ In, None
@ Out, crossrefs, dict, mapping of dictionaries with information about the entities required.
"""
return self._economics.get_crossrefs()
def set_crossrefs(self, refs):
"""
Connect cross-reference material from other entities to the ValuedParams in this component.
@ In, refs, dict, dictionary of entity information
@ Out, None
"""
self._economics.set_crossrefs(refs)
def get_incremental_cost(self, activity, raven_vars, meta, t):
"""
get the cost given particular activities
@ In, activity, pandas.Series, scenario variable values to evaluate cost of
@ In, raven_vars, dict, additional variables (presumably from raven) that might be needed
@ In, meta, dict, further dictionary of information that might be needed
@ In, t, int, time step at which cost needs to be evaluated
@ Out, cost, float, cost of activity
"""
return self._economics.incremental_cost(activity, raven_vars, meta, t)
def get_economics(self):
"""
Accessor for economics.
@ In, None
@ Out, econ, CashFlowGroup, cash flows for this cash flow user
"""
return self._economics
| [
198,
2,
15069,
12131,
11,
12350,
13485,
6682,
10302,
11,
11419,
198,
2,
11096,
371,
34874,
15731,
1137,
53,
1961,
198,
11748,
25064,
198,
6738,
16210,
7414,
1666,
1330,
16210,
37535,
13247,
198,
11748,
4808,
26791,
355,
289,
26791,
198,
... | 2.998899 | 908 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from .chart import Chart
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
1330,
12176,
274,
18,
35,
198,
6738,
2603,
29487,
8019,
1330,
12067,
198,
6... | 2.958333 | 48 |
#!/usr/bin/env python3
"""
gps_manage.py
Script to control donkey car with GPS navigation. Waypoints are set with GPS coordinates in degrees.
Call: gps_manage.py -drive
"""
# import GPS Planner and other DK parts
import donkeycar as dk
from gps_parts.gps import GPS
from gps_parts.planner import Planner
from donkeycar.vehicle import Vehicle
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
# other important modules
import serial
import pynmea2
import time
import threading
def drive(cfg, goalLocation):
"""
drive(cfg, goalLocation)
Add GPS, Planner, and actuator parts and call DK Vehicle.py to run car.
@param: cfg - configuration file from dk calibration
goalLocation - list of GPS coordinates in degrees
@return: None
"""
# initialize vehicle
V = Vehicle()
# GPS is a DK part that will poll GPS data from serial port
# and output current location in radians.
gps = GPS(cfg.BAUD_RATE, cfg.PORT, cfg.TIMEOUT)
# Planner is a DK part that calculates control signals to actuators based on current location
# from GPS
planner = Planner(goalLocation=goalLocation, steer_gain=cfg.STEERING_P_GAIN,
throttle_gain=cfg.THROTTLE_P_GAIN)
# Actuators: steering and throttle
steering_controller = PCA9685(cfg.STEERING_CHANNEL)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
# add threaded part for gps controller
V.add(gps, outputs=["currLocation", "prevLocation"], threaded=True)
# add planner, actuator parts
V.add(planner, inputs=["currLocation", "prevLocation"], outputs=["steer_cmd", "throttle_cmd"])
V.add(steering, inputs=['steer_cmd'])
V.add(throttle, inputs=['throttle_cmd'])
V.start()
if __name__ == '__main__':
# goalLocation is a list of lists: each sublist a waypoint for the controller.
goalLocation = [[32.8811271,-117.2342783], [32.8812414, -117.2374792]]
goalLocation = [[32.881322,-117.235454], [32.881162,-117.235459]]
goalLocation = [[32.881018, -117.235807]]
cfg = dk.load_config()
drive(cfg, goalLocation)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
70,
862,
62,
805,
496,
13,
9078,
198,
198,
7391,
284,
1630,
50085,
1097,
351,
15472,
16408,
13,
6378,
13033,
389,
900,
351,
15472,
22715,
287,
7370,
13,
198,
198,
141... | 2.406481 | 1,080 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" RLBook.Utils
- NeuralNetwork Base Class definition
"""
import logging
from abc import abstractclassmethod, ABCMeta
from keras import backend as K
from keras.losses import mean_squared_error
from keras.models import model_from_json
class NeuralNet:
""" This class specifies the base NeuralNet class.
"""
__metaclass__ = ABCMeta
def __init__(self, check_point=0):
""" Initialise a NeuralNetwork
:param check_point: Counter that will be used when save the NN
"""
self.CHECK_POINT = check_point
self.model = None
@abstractclassmethod
def train(self, examples):
""" This function trains the neural network with examples obtained from self-play.
@:param examples: a list of training examples, where each example is of form
(board, pi, v). pi is the MCTS informed policy vector for
the given board, and v is its value. The examples has
board in its canonical form.
"""
pass
@abstractclassmethod
def predict(self, board):
""" Predict given a board state
:param board: Current board in its canonical form.
:returns: pi: A list of (action, pi) tuples
v: a float in [-1,1] that gives the value of the current board
"""
pass
def save_checkpoint(self, filename: str):
""" Saves the current neural network (with its parameters) into a given filename
"""
# serialize model to JSON
model_json = self.model.to_json()
with open('{}.json'.format(filename), "w") as json_file:
json_file.write(model_json)
# Serialize weights to HDF5
self.model.save_weights("{}.h5".format(filename))
logging.info("Model has been check-pointed: {}".format(filename))
def load_checkpoint(self, filename):
""" Loads parameters of the neural network from a given filename
"""
with open('{}.json'.format(filename), 'r') as json_file:
loaded_model_json = json_file.read()
self.model = model_from_json(loaded_model_json)
# load weights into new model
self.model.load_weights("{}.h5".format(filename))
logging.info("Model has been loaded from a check-pointed: {}".format(filename))
@property
@increment.setter
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
45715,
10482,
13,
18274,
4487,
198,
198,
12,
47986,
26245,
7308,
5016,
6770,
198,
198,
37811,
198,
11748,
18931,
... | 2.461994 | 1,013 |
#!/usr/bin/env python3
# Provide `os.walk(1)`.
import os
# Provide `sys.exit(1)`.
import sys
STATUS_SUCCESS = 0
SOURCES_BASE = '../../src/' # TODO Stop relying on this.
if __name__ == '__main__':
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
220,
220,
44290,
4600,
418,
13,
11152,
7,
16,
8,
44646,
198,
11748,
28686,
198,
198,
2,
220,
220,
44290,
4600,
17597,
13,
37023,
7,
16,
8,
44646,
198,
11748,
25064,
628... | 2.23 | 100 |
BOT_NAME = "dragon_talon"
SPIDER_MODULES = ["dragon_talon.spiders"]
NEWSPIDER_MODULE = "dragon_talon.spiders"
LOG_LEVEL = "INFO"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 1
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 5
# The download delay setting will honor only one of:
ONCURRENT_REQUESTS_PER_DOMAIN = 1
CONCURRENT_REQUESTS_PER_IP = 1
RANDOMIZE_DOWNLOAD_DELAY = True
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialDownloaderMiddleware': 543,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"dragon_talon.pipelines.MongoPipeline": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = False
# # The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# # The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# # The average number of requests Scrapy should be sending in parallel to
# # each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0
# # Enable showing throttling stats for every response received:
# # AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 30 * 60
HTTPCACHE_DIR = "httpcache"
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Spidermon is a framework to build monitors for Scrapy spiders.
# SPIDERMON_ENABLED = True
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'spidermon.contrib.scrapy.extensions.Spidermon': 500,
# }
| [
33,
2394,
62,
20608,
796,
366,
14844,
62,
39240,
261,
1,
198,
198,
4303,
41237,
62,
33365,
6239,
1546,
796,
14631,
14844,
62,
39240,
261,
13,
2777,
4157,
8973,
198,
13965,
4303,
41237,
62,
33365,
24212,
796,
366,
14844,
62,
39240,
261... | 2.811483 | 1,045 |
"""Holds string literals for commands
"""
START_COMMAND = "start"
CLEAR_CHAT_COMMAND = "clearchatwithbot"
SHOW_KARMA_COMMAND = 'showkarma'
USER_INFO_COMMAND = 'userinfo'
CHAT_INFO_COMMAND = 'chatinfo'
HISTORY_GRAPH_COMMAND = 'historygraph'
SHOW_KARMA_KEYBOARD_COMMAND = 'checkchatkarmas' | [
37811,
39,
10119,
4731,
4187,
874,
329,
9729,
198,
37811,
198,
2257,
7227,
62,
9858,
44,
6981,
796,
366,
9688,
1,
198,
29931,
1503,
62,
31542,
62,
9858,
44,
6981,
796,
366,
2375,
998,
265,
4480,
13645,
1,
198,
9693,
3913,
62,
42,
... | 2.391667 | 120 |